Compare commits
1 Commits
rei/flake
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
549e1acd05 |
@@ -1,141 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
|
||||
# compatible wheel, if so rename the wheel before repairing it.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
from zipfile import ZipFile
|
||||
|
||||
from packaging.tags import Tag
|
||||
from packaging.utils import parse_wheel_filename
|
||||
from packaging.version import Version
|
||||
|
||||
|
||||
def check_is_abi3_compatible(wheel_file: str) -> None:
|
||||
"""Check the contents of the built wheel for any `.so` files that are *not*
|
||||
abi3 compatible.
|
||||
"""
|
||||
|
||||
with ZipFile(wheel_file, "r") as wheel:
|
||||
for file in wheel.namelist():
|
||||
if not file.endswith(".so"):
|
||||
continue
|
||||
|
||||
if not file.endswith(".abi3.so"):
|
||||
raise Exception(f"Found non-abi3 lib: {file}")
|
||||
|
||||
|
||||
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
|
||||
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
|
||||
|
||||
if tag.abi == "abi3":
|
||||
# Nothing to do.
|
||||
return wheel_file
|
||||
|
||||
check_is_abi3_compatible(wheel_file)
|
||||
|
||||
# HACK: it seems that some older versions of pip will consider a wheel marked
|
||||
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
|
||||
# here; there are some clues in
|
||||
# https://github.com/pantsbuild/pants/pull/12857
|
||||
# https://github.com/pypa/pip/issues/9138
|
||||
# https://github.com/pypa/packaging/pull/319
|
||||
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
|
||||
# both versions are valid for backwards compatibility.
|
||||
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
|
||||
abi3_tag = Tag(tag.interpreter, "abi3", platform)
|
||||
|
||||
dirname = os.path.dirname(wheel_file)
|
||||
new_wheel_file = os.path.join(
|
||||
dirname,
|
||||
f"{name}-{version}-{abi3_tag}.whl",
|
||||
)
|
||||
|
||||
os.rename(wheel_file, new_wheel_file)
|
||||
|
||||
print("Renamed wheel to", new_wheel_file)
|
||||
|
||||
return new_wheel_file
|
||||
|
||||
|
||||
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
|
||||
"""Entry point"""
|
||||
|
||||
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
|
||||
# normalizes the package name (i.e. it converts matrix_synapse ->
|
||||
# matrix-synapse), which is not what we want.
|
||||
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
|
||||
name = os.path.basename(wheel_file).split("-")[0]
|
||||
|
||||
if len(tags) != 1:
|
||||
# We expect only a wheel file with only a single tag
|
||||
raise Exception(f"Unexpectedly found multiple tags: {tags}")
|
||||
|
||||
tag = next(iter(tags))
|
||||
|
||||
if build:
|
||||
# We don't use build tags in Synapse
|
||||
raise Exception(f"Unexpected build tag: {build}")
|
||||
|
||||
# If the wheel is for cpython then convert it into an abi3 wheel.
|
||||
if tag.interpreter.startswith("cp"):
|
||||
wheel_file = cpython(wheel_file, name, version, tag)
|
||||
|
||||
# Finally, repair the wheel.
|
||||
if archs is not None:
|
||||
# If we are given archs then we are on macos and need to use
|
||||
# `delocate-listdeps`.
|
||||
subprocess.run(["delocate-listdeps", wheel_file], check=True)
|
||||
subprocess.run(
|
||||
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
|
||||
check=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel-dir",
|
||||
"-w",
|
||||
metavar="WHEEL_DIR",
|
||||
help="Directory to store delocated wheels",
|
||||
required=True,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--require-archs",
|
||||
metavar="archs",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"wheel_file",
|
||||
metavar="WHEEL_FILE",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
wheel_file = args.wheel_file
|
||||
wheel_dir = args.wheel_dir
|
||||
archs = args.require_archs
|
||||
|
||||
main(wheel_file, wheel_dir, archs)
|
||||
@@ -18,13 +18,6 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def set_output(key: str, value: str):
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
|
||||
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
|
||||
print(f"{key}={value}", file=f)
|
||||
|
||||
|
||||
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
@@ -46,7 +39,7 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +47,7 @@ trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"postgres-version": "10",
|
||||
"extras": "all",
|
||||
}
|
||||
]
|
||||
@@ -62,9 +55,9 @@ trial_postgres_tests = [
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.11",
|
||||
"python-version": "3.10",
|
||||
"database": "postgres",
|
||||
"postgres-version": "15",
|
||||
"postgres-version": "14",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
@@ -88,7 +81,7 @@ print("::endgroup::")
|
||||
test_matrix = json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
|
||||
)
|
||||
set_output("trial_test_matrix", test_matrix)
|
||||
print(f"::set-output name=trial_test_matrix::{test_matrix}")
|
||||
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
@@ -109,26 +102,11 @@ sytest_tests = [
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
]
|
||||
|
||||
if not IS_PR:
|
||||
sytest_tests.extend(
|
||||
[
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "focal",
|
||||
"postgres": "postgres",
|
||||
"reactor": "asyncio",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
@@ -147,4 +125,4 @@ print(json.dumps(sytest_tests, indent=4))
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(sytest_tests)
|
||||
set_output("sytest_test_matrix", test_matrix)
|
||||
print(f"::set-output name=sytest_test_matrix::{test_matrix}")
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
raise RuntimeError("Requires at least Python 3.11, to import tomllib")
|
||||
|
||||
import tomllib
|
||||
|
||||
with open("poetry.lock", "rb") as f:
|
||||
lockfile = tomllib.load(f)
|
||||
|
||||
try:
|
||||
lock_version = lockfile["metadata"]["lock-version"]
|
||||
assert lock_version == "2.0"
|
||||
except Exception:
|
||||
print(
|
||||
"""\
|
||||
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
|
||||
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
|
||||
https://matrix-org.github.io/synapse/develop/development/dependencies.html
|
||||
"""
|
||||
)
|
||||
raise
|
||||
@@ -35,9 +35,9 @@ sed -i \
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies and the dev-docs
|
||||
# group from the toml file. This means we don't have to ensure compatibility between
|
||||
# old deps and dev tools.
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install toml wheel
|
||||
|
||||
@@ -47,14 +47,13 @@ with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
del data['tool']['poetry']['group']['dev-docs']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pip install poetry==1.3.2
|
||||
pip install poetry==1.2.0
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
|
||||
@@ -9,9 +9,19 @@ set -eu
|
||||
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
|
||||
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
|
||||
|
||||
block Set Go Version
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
|
||||
@@ -23,9 +23,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-dat
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir_r="/tmp/export_data/rooms"
|
||||
dir_u="/tmp/export_data/user_data"
|
||||
if [ -d "$dir_r" ] && [ -d "$dir_u" ]; then
|
||||
dir="/tmp/export_data/rooms"
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a sqlite database."
|
||||
@@ -44,9 +43,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-d
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir_r2="/tmp/export_data2/rooms"
|
||||
dir_u2="/tmp/export_data2/user_data"
|
||||
if [ -d "$dir_r2" ] && [ -d "$dir_u2" ]; then
|
||||
dir2="/tmp/export_data2/rooms"
|
||||
if [ -d "$dir2" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a postgres database."
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.{py,pyi}]
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
11
.flake8
Normal file
11
.flake8
Normal file
@@ -0,0 +1,11 @@
|
||||
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
|
||||
# See https://github.com/PyCQA/flake8/issues/234
|
||||
[flake8]
|
||||
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
|
||||
# for error codes. The ones we ignore are:
|
||||
# W503: line break before binary operator
|
||||
# W504: line break after binary operator
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
@@ -21,8 +21,4 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
|
||||
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
|
||||
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
|
||||
# Update black to 23.1.0 (#15103)
|
||||
9bb2eac71962970d02842bca441f4bcdbbf93a11
|
||||
|
||||
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -74,36 +74,6 @@ body:
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: database
|
||||
attributes:
|
||||
label: Database
|
||||
description: |
|
||||
Are you using SQLite or PostgreSQL? What's the version of your database?
|
||||
|
||||
If PostgreSQL, please also answer the following:
|
||||
- are you using a single PostgreSQL server
|
||||
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
|
||||
- have you previously ported from SQLite using the Synapse "portdb" script?
|
||||
- have you previously restored from a backup?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: workers
|
||||
attributes:
|
||||
label: Workers
|
||||
description: |
|
||||
Are you running a single Synapse process, or are you running
|
||||
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
|
||||
options:
|
||||
- Single process
|
||||
- Multiple workers
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
@@ -113,28 +83,17 @@ body:
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Do you have any unusual config options turned on? If so, please provide details.
|
||||
|
||||
- Experimental or undocumented features
|
||||
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
|
||||
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
|
||||
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually impossible to diagnose without logging.**
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -15,9 +15,3 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
versioning-strategy: "lockfile-only"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
7
.github/workflows/dependabot_changelog.yml
vendored
7
.github/workflows/dependabot_changelog.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- reopened # For debugging!
|
||||
|
||||
permissions:
|
||||
# Needed to be able to push the commit. See
|
||||
# Needed to be able to push the commit. See
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
|
||||
# for a similar example
|
||||
contents: write
|
||||
@@ -20,11 +20,8 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
- name: Write, commit and push changelog
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc
|
||||
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
|
||||
git add changelog.d
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
|
||||
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -35,20 +34,11 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: |
|
||||
docker.io/matrixdotorg/synapse
|
||||
ghcr.io/matrix-org/synapse
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
@@ -58,7 +48,7 @@ jobs:
|
||||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
|
||||
34
.github/workflows/docs-pr-netlify.yaml
vendored
34
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Deploy documentation PR preview
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Prepare documentation PR preview" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
netlify:
|
||||
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
name: book
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
branch: ${{ github.event.workflow_run.head_branch }}
|
||||
revision: ${{ github.event.workflow_run.head_sha }}
|
||||
token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||
site_id: ${{ secrets.NETLIFY_SITE_ID }}
|
||||
desc: Documentation preview
|
||||
deployment_env: PR Documentation Preview
|
||||
60
.github/workflows/docs-pr.yaml
vendored
60
.github/workflows/docs-pr.yaml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Prepare documentation PR preview
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- docs/**
|
||||
- book.toml
|
||||
- .github/workflows/docs-pr.yaml
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
# We'll only use this in a workflow_run, then we're done with it
|
||||
retention-days: 1
|
||||
|
||||
link-check:
|
||||
name: Check links in documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Setup htmltest
|
||||
run: |
|
||||
wget https://github.com/wjdp/htmltest/releases/download/v0.17.0/htmltest_0.17.0_linux_amd64.tar.gz
|
||||
echo '775c597ee74899d6002cd2d93076f897f4ba68686bceabe2e5d72e84c57bc0fb htmltest_0.17.0_linux_amd64.tar.gz' | sha256sum -c
|
||||
tar zxf htmltest_0.17.0_linux_amd64.tar.gz
|
||||
|
||||
- name: Test links with htmltest
|
||||
# Build the book with `./` as the site URL (to make checks on 404.html possible)
|
||||
# Then run htmltest (without checking external links since that involves the network and is slow).
|
||||
run: |
|
||||
MDBOOK_OUTPUT__HTML__SITE_URL="./" mdbook build
|
||||
./htmltest book --skip-external
|
||||
81
.github/workflows/docs.yaml
vendored
81
.github/workflows/docs.yaml
vendored
@@ -13,10 +13,25 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pre:
|
||||
name: Calculate variables for GitHub Pages deployment
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
# Figure out the target directory.
|
||||
#
|
||||
# The target directory depends on the name of the branch
|
||||
@@ -39,66 +54,12 @@ jobs:
|
||||
esac
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
|
||||
outputs:
|
||||
branch-version: ${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-docs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
echo "::set-output name=branch-version::$branch"
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-devdocs:
|
||||
name: GitHub Pages (developer docs)
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Set up Sphinx"
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
groups: "dev-docs"
|
||||
extras: ""
|
||||
|
||||
- name: Build the documentation
|
||||
run: |
|
||||
cd dev-docs
|
||||
poetry run make html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./dev-docs/_build/html
|
||||
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
23
.github/workflows/latest_deps.yml
vendored
23
.github/workflows/latest_deps.yml
vendored
@@ -27,7 +27,10 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
@@ -35,7 +38,7 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
poetry-version: "1.2.0"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -59,7 +62,10 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
@@ -70,7 +76,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -130,7 +136,10 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
@@ -178,8 +187,6 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -204,7 +211,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
24
.github/workflows/poetry_lockfile.yaml
vendored
24
.github/workflows/poetry_lockfile.yaml
vendored
@@ -1,24 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
paths:
|
||||
- poetry.lock
|
||||
pull_request:
|
||||
paths:
|
||||
- poetry.lock
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-sdists:
|
||||
name: "Check locked dependencies have sdists"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install tomli
|
||||
- run: ./scripts-dev/check_locked_deps_have_sdists.py
|
||||
74
.github/workflows/push_complement_image.yml
vendored
74
.github/workflows/push_complement_image.yml
vendored
@@ -1,74 +0,0 @@
|
||||
# This task does not run complement tests, see tests.yaml instead.
|
||||
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
|
||||
|
||||
name: Store complement-synapse image in ghcr.io
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
default: 'develop'
|
||||
type: choice
|
||||
options:
|
||||
- develop
|
||||
- master
|
||||
|
||||
# Only run this action once per pull request/branch; restart if a new commit arrives.
|
||||
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
|
||||
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and push complement image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
|
||||
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
|
||||
type=sha,format=long
|
||||
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
|
||||
run: scripts-dev/complement.sh --build-only
|
||||
- name: Tag and push generated image
|
||||
run: |
|
||||
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
|
||||
echo "tag and push $TAG"
|
||||
docker tag complement-synapse $TAG
|
||||
docker push $TAG
|
||||
done
|
||||
58
.github/workflows/release-artifacts.yml
vendored
58
.github/workflows/release-artifacts.yml
vendored
@@ -4,15 +4,13 @@ name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs). PRs skip
|
||||
# building wheels on macOS & ARM.
|
||||
# of things breaking (but only build one set of debs)
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -28,9 +26,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
@@ -38,7 +34,7 @@ jobs:
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "distros=$dists" >> "$GITHUB_OUTPUT"
|
||||
echo "::set-output name=distros::$dists"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
@@ -73,9 +69,7 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
uses: actions/setup-python@v2
|
||||
|
||||
- name: Build the packages
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
@@ -97,63 +91,39 @@ jobs:
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-11"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
arch: aarch64
|
||||
os: "macos-10.15"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
python-version: "3.x"
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Build aarch64 wheels
|
||||
if: matrix.arch == 'aarch64'
|
||||
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
# Only build a single wheel in CI.
|
||||
- name: Set env vars.
|
||||
run: |
|
||||
echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp3*-* *i686* *musl*
|
||||
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
209
.github/workflows/tests.yml
vendored
209
.github/workflows/tests.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -28,19 +27,14 @@ jobs:
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/generate_sample_config.sh --check
|
||||
- run: poetry run scripts-dev/config-lint.sh
|
||||
@@ -49,75 +43,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
check-lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
install-project: "false"
|
||||
|
||||
- name: Import order (isort)
|
||||
run: poetry run isort --check --diff .
|
||||
|
||||
- name: Code style (black)
|
||||
run: poetry run black --check --diff .
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
name: Typechecking
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
# We want to make use of type hints in optional dependencies too.
|
||||
extras: all
|
||||
# We have seen odd mypy failures that were resolved when we started
|
||||
# installing the project again:
|
||||
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
|
||||
# To make CI green, err towards caution and install the project.
|
||||
install-project: "true"
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# NB: I have two concerns with this action:
|
||||
# 1. We occasionally see odd mypy problems that aren't reproducible
|
||||
# locally with clean caches. I suspect some dodgy caching behaviour.
|
||||
# 2. The action uses GHA machinery that's deprecated
|
||||
# (https://github.com/AustinScola/mypy-cache-github-action/issues/277)
|
||||
# It may be simpler to use actions/cache ourselves to restore .mypy_cache.
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: AustinScola/mypy-cache-github-action@df56268388422ee282636ee2c7a9cc55ec644a41
|
||||
|
||||
- name: Run mypy
|
||||
run: poetry run mypy
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
||||
with:
|
||||
typechecking-extras: "all"
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -134,9 +67,7 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
@@ -148,12 +79,8 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
@@ -166,31 +93,14 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
# We also lint against a nightly rustc so that we can lint the benchmark
|
||||
# suite, which requires a nightly compiler.
|
||||
lint-clippy-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
- run: cargo clippy
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -201,11 +111,11 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
# We use nightly so that it correctly groups together imports
|
||||
toolchain: nightly-2022-12-01
|
||||
components: rustfmt
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
@@ -215,13 +125,11 @@ jobs:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs:
|
||||
- lint
|
||||
- lint-mypy
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- check-lockfile
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
@@ -234,9 +142,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- uses: actions/setup-python@v2
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
@@ -256,33 +162,23 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
# 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
|
||||
# 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
--tmpfs /var/lib/postgres:rw,size=6144m \
|
||||
--mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: ${{ matrix.job.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: poetry run trial --jobs=6 tests
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: /var/run/postgresql
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
@@ -307,7 +203,10 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
@@ -344,10 +243,9 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j6 tests
|
||||
- run: poetry run trial -j2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -379,7 +277,6 @@ jobs:
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
@@ -407,7 +304,6 @@ jobs:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
|
||||
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
|
||||
WORKERS: ${{ matrix.job.workers && 1 }}
|
||||
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
@@ -423,7 +319,10 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -467,7 +366,6 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
env:
|
||||
@@ -485,10 +383,10 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "11"
|
||||
postgres-version: "10"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "15"
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -506,20 +404,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
# behind new releases, so we have to use the PostreSQL apt repository.
|
||||
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
|
||||
run: |
|
||||
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.3.2"
|
||||
extras: "postgres"
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
id: run_tester_script
|
||||
@@ -563,21 +451,19 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
env:
|
||||
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
|
||||
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
|
||||
name: Run Complement Tests
|
||||
|
||||
cargo-test:
|
||||
@@ -591,31 +477,14 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
# We want to ensure that the cargo benchmarks still compile, which requires a
|
||||
# nightly compiler.
|
||||
cargo-bench:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo bench --no-run
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
@@ -627,7 +496,6 @@ jobs:
|
||||
- portdb
|
||||
- complement
|
||||
- cargo-test
|
||||
- cargo-bench
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
@@ -639,4 +507,3 @@ jobs:
|
||||
skippable: |
|
||||
lint-newsfile
|
||||
cargo-test
|
||||
cargo-bench
|
||||
|
||||
29
.github/workflows/triage-incoming.yml
vendored
29
.github/workflows/triage-incoming.yml
vendored
@@ -5,11 +5,24 @@ on:
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2
|
||||
with:
|
||||
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
|
||||
content_id: ${{ github.event.issue.node_id }}
|
||||
secrets:
|
||||
github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
|
||||
add_new_issues:
|
||||
name: Add new issues to the triage board
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation add_to_project($projectid:ID!,$contentid:ID!) {
|
||||
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
|
||||
item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
|
||||
54
.github/workflows/triage_labelled.yml
vendored
54
.github/workflows/triage_labelled.yml
vendored
@@ -11,34 +11,34 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@main
|
||||
id: add_project
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
- name: Set status
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
run: |
|
||||
gh api graphql -f query='
|
||||
mutation(
|
||||
$project: ID!
|
||||
$item: ID!
|
||||
$fieldid: ID!
|
||||
$columnid: String!
|
||||
) {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $project
|
||||
itemId: $item
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: $columnid
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation {
|
||||
updateProjectV2ItemFieldValue(
|
||||
input: {
|
||||
projectId: $projectid
|
||||
itemId: $contentid
|
||||
fieldId: $fieldid
|
||||
value: {
|
||||
singleSelectOptionId: "Todo"
|
||||
}
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
) {
|
||||
projectV2Item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
}' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
|
||||
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
fieldid: ${{ env.FIELD_ID }}
|
||||
optionid: ${{ env.OPTION_ID }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
|
||||
OPTION_ID: "ba22e43c"
|
||||
|
||||
31
.github/workflows/twisted_trunk.yml
vendored
31
.github/workflows/twisted_trunk.yml
vendored
@@ -5,13 +5,6 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
required: false
|
||||
default: 'trunk'
|
||||
type: string
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -25,7 +18,10 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -34,7 +30,7 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
@@ -48,7 +44,10 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -85,7 +84,10 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
@@ -141,8 +143,6 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -151,11 +151,12 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.3.2
|
||||
pipx install poetry==1.1.14
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.14 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
@@ -177,7 +178,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -18,7 +18,6 @@ __pycache__/
|
||||
# We do want the poetry and cargo lockfile.
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
!flake.lock
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
@@ -37,7 +36,6 @@ __pycache__/
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
.direnv/
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
@@ -54,7 +52,6 @@ __pycache__/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/dev-docs/_build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
@@ -63,7 +60,7 @@ book/
|
||||
|
||||
# complement
|
||||
/complement-*
|
||||
/main.tar.gz
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
@@ -71,6 +68,3 @@ book/
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
# Don't include users' poetry configs
|
||||
/poetry.toml
|
||||
|
||||
1296
CHANGES.md
1296
CHANGES.md
File diff suppressed because it is too large
Load Diff
105
Cargo.lock
generated
105
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.70"
|
||||
version = "1.0.65"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
|
||||
checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
|
||||
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
@@ -104,9 +104,9 @@ checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.4"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
|
||||
checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
@@ -116,15 +116,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.135"
|
||||
version = "0.2.132"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
|
||||
checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.9"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
|
||||
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
@@ -156,9 +156,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.15.0"
|
||||
version = "1.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
|
||||
checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
@@ -185,18 +185,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.43"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.17.3"
|
||||
version = "0.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
|
||||
checksum = "12f72538a0230791398a0986a6518ebd88abc3fded89007b506ed072acc831e1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
@@ -212,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.17.3"
|
||||
version = "0.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
|
||||
checksum = "fc4cf18c20f4f09995f3554e6bcf9b09bd5e4d6b67c562fdfaafa644526ba479"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -222,9 +222,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.17.3"
|
||||
version = "0.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
|
||||
checksum = "a41877f28d8ebd600b6aa21a17b40c3b0fc4dfe73a27b6e81ab3d895e401b0e9"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
@@ -232,9 +232,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.1"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
|
||||
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -243,25 +243,25 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.3"
|
||||
version = "0.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
|
||||
checksum = "2e81c8d4bcc2f216dc1b665412df35e46d12ee8d3d046b381aad05f1fcf30547"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
"quote",
|
||||
"syn 1.0.104",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.17.3"
|
||||
version = "0.17.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
checksum = "85752a767ee19399a78272cc2ab625cd7d373b2e112b4b13db28de71fa892784"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.104",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -276,9 +276,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.3"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
|
||||
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -305,9 +305,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.29"
|
||||
version = "0.6.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.160"
|
||||
version = "1.0.145"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c"
|
||||
checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.160"
|
||||
version = "1.0.145"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df"
|
||||
checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.96"
|
||||
version = "1.0.85"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
|
||||
checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -354,9 +354,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
@@ -366,20 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.104"
|
||||
version = "1.0.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -417,9 +406,9 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.5"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
|
||||
|
||||
[[package]]
|
||||
name = "unindent"
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Speedup tests by caching HomeServerConfig instances.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where cached key results which were directly fetched would not be properly re-used.
|
||||
@@ -1 +0,0 @@
|
||||
Always use multi-user device resync replication endpoints.
|
||||
@@ -1 +0,0 @@
|
||||
Add a Nix flake for use as a development environment.
|
||||
@@ -1,28 +0,0 @@
|
||||
# Schema symlinks
|
||||
|
||||
This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc.
|
||||
|
||||
In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against).
|
||||
|
||||
## Example
|
||||

|
||||
|
||||
## Caveats
|
||||
|
||||
- Doesn't include temporary tables created ad-hoc by Synapse.
|
||||
- Postgres only. IDEs will likely be confused by SQLite-specific queries.
|
||||
- Will not include migrations created after the latest schema dump.
|
||||
- Symlinks might confuse checkouts on Windows systems.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Jetbrains IDEs with DataGrip plugin
|
||||
|
||||
- View -> Tool Windows -> Database
|
||||
- `+` Icon -> DDL Data Source
|
||||
- Pick a name, e.g. `Synapse schema dump`
|
||||
- Under sources, click `+`.
|
||||
- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL.
|
||||
- OK, and OK.
|
||||
- IDE should now be aware of the schema.
|
||||
- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`.
|
||||
@@ -1 +0,0 @@
|
||||
../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 13 KiB |
@@ -1 +0,0 @@
|
||||
../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres
|
||||
@@ -1 +0,0 @@
|
||||
../../synapse/storage/schema/common/schema_version.sql
|
||||
@@ -1 +0,0 @@
|
||||
../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres
|
||||
@@ -68,7 +68,6 @@ redis:
|
||||
enabled: true
|
||||
host: redis
|
||||
port: 6379
|
||||
# dbid: <redis_logical_db_id>
|
||||
# password: <secret_password>
|
||||
```
|
||||
|
||||
@@ -95,6 +94,20 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
|
||||
### Add Workers to `instance_map`
|
||||
|
||||
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
|
||||
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
|
||||
port: 8034 # The port assigned to the replication listener in your worker config file
|
||||
synapse-federation-sender-1:
|
||||
host: synapse-federation-sender-1
|
||||
port: 8034
|
||||
```
|
||||
|
||||
### Configure Federation Senders
|
||||
|
||||
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
|
||||
@@ -109,4 +122,4 @@ federation_sender_instances:
|
||||
|
||||
## Other Worker types
|
||||
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
@@ -5,4 +5,10 @@ worker_name: synapse-federation-sender-1
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /data/federation_sender.log.config
|
||||
|
||||
@@ -6,6 +6,10 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
- type: http
|
||||
port: 8081
|
||||
x_forwarded: true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
# `lnav` config for Synapse logs
|
||||
|
||||
[lnav](https://lnav.org/) is a log-viewing tool. It is particularly useful when
|
||||
you need to interleave multiple log files, or for exploring a large log file
|
||||
with regex filters. The downside is that it is not as ubiquitous as tools like
|
||||
`less`, `grep`, etc.
|
||||
|
||||
This directory contains an `lnav` [log format definition](
|
||||
https://docs.lnav.org/en/v0.10.1/formats.html#defining-a-new-format
|
||||
) for Synapse logs as
|
||||
emitted by Synapse with the default [logging configuration](
|
||||
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#log_config
|
||||
). It supports lnav 0.10.1 because that's what's packaged by my distribution.
|
||||
|
||||
This should allow lnav:
|
||||
|
||||
- to interpret timestamps, allowing log interleaving;
|
||||
- to interpret log severity levels, allowing colouring by log level(!!!);
|
||||
- to interpret request IDs, allowing you to skip through a specific request; and
|
||||
- to highlight room, event and user IDs in logs.
|
||||
|
||||
See also https://gist.github.com/benje/e2ab750b0a81d11920d83af637d289f7 for a
|
||||
similar example.
|
||||
|
||||
## Example
|
||||
|
||||
[](https://asciinema.org/a/556133)
|
||||
|
||||
## Tips
|
||||
|
||||
- `lnav -i /path/to/synapse/checkout/contrib/lnav/synapse-log-format.json`
|
||||
- `lnav my_synapse_log_file` or `lnav synapse_log_files.*`, etc.
|
||||
- `lnav --help` for CLI help.
|
||||
|
||||
Within lnav itself:
|
||||
|
||||
- `?` for help within lnav itself.
|
||||
- `q` to quit.
|
||||
- `/` to search a-la `less` and `vim`, then `n` and `N` to continue searching
|
||||
down and up.
|
||||
- Use `o` and `O` to skip through logs based on the request ID (`POST-1234`, or
|
||||
else the value of the [`request_id_header`](
|
||||
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=request_id_header#listeners
|
||||
) header). This may get confused if the same request ID is repeated among
|
||||
multiple files or process restarts.
|
||||
- ???
|
||||
- Profit
|
||||
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"$schema": "https://lnav.org/schemas/format-v1.schema.json",
|
||||
"synapse": {
|
||||
"title": "Synapse logs",
|
||||
"description": "Logs output by Synapse, a Matrix homesever, under its default logging config.",
|
||||
"regex": {
|
||||
"log": {
|
||||
"pattern": ".*(?<timestamp>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) - (?<logger>.+) - (?<lineno>\\d+) - (?<level>\\w+) - (?<context>.+) - (?<body>.*)"
|
||||
}
|
||||
},
|
||||
"json": false,
|
||||
"timestamp-field": "timestamp",
|
||||
"timestamp-format": [
|
||||
"%Y-%m-%d %H:%M:%S,%L"
|
||||
],
|
||||
"level-field": "level",
|
||||
"body-field": "body",
|
||||
"opid-field": "context",
|
||||
"level": {
|
||||
"critical": "CRITICAL",
|
||||
"error": "ERROR",
|
||||
"warning": "WARNING",
|
||||
"info": "INFO",
|
||||
"debug": "DEBUG"
|
||||
},
|
||||
"sample": [
|
||||
{
|
||||
"line": "my-matrix-server-generic-worker-4 | 2023-01-27 09:47:09,818 - synapse.replication.tcp.client - 381 - ERROR - PUT-32992 - Timed out waiting for stream receipts",
|
||||
"level": "error"
|
||||
},
|
||||
{
|
||||
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
|
||||
"level": "warning"
|
||||
},
|
||||
{
|
||||
"line": "my-matrix-server | 2023-01-25 20:55:54,433 - synapse.storage.databases - 66 - INFO - main - [database config 'master']: Checking database server",
|
||||
"level": "info"
|
||||
},
|
||||
{
|
||||
"line": "my-matrix-server | 2023-01-26 15:08:40,447 - synapse.access.http.8008 - 460 - INFO - PUT-74929 - 0.0.0.0 - 8008 - {@alice:example.com} Processed request: 0.011sec/0.000sec (0.000sec, 0.000sec) (0.001sec/0.008sec/3) 2B 200 \"PUT /_matrix/client/r0/user/%40alice%3Atexample.com/account_data/im.vector.setting.breadcrumbs HTTP/1.0\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Element/1.11.20 Chrome/108.0.5359.179 Electron/22.0.3 Safari/537.36\" [0 dbevts]",
|
||||
"level": "info"
|
||||
}
|
||||
],
|
||||
"highlights": {
|
||||
"user_id": {
|
||||
"pattern": "(@|%40)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
|
||||
"underline": true
|
||||
},
|
||||
"room_id": {
|
||||
"pattern": "(!|%21)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
|
||||
"underline": true
|
||||
},
|
||||
"room_alias": {
|
||||
"pattern": "(#|%23)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
|
||||
"underline": true
|
||||
},
|
||||
"event_id_v1_v2": {
|
||||
"pattern": "(\\$|%25)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
|
||||
"underline": true
|
||||
},
|
||||
"event_id_v3_plus": {
|
||||
"pattern": "(\\$|%25)([A-Za-z0-9+/_]|-){43}",
|
||||
"underline": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,19 +15,19 @@ worker_name: generic_worker$i
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
#worker_pid_file: DATADIR/generic_worker$i.pid
|
||||
EOF
|
||||
done
|
||||
```
|
||||
|
||||
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
|
||||
|
||||
Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
|
||||
Customise the script to your needs.
|
||||
|
||||
@@ -8,9 +8,7 @@ It also prints out the example lines for Synapse main configuration file.
|
||||
|
||||
Remember to route necessary endpoints directly to a worker associated with it.
|
||||
|
||||
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
|
||||
|
||||
Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
|
||||
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
@@ -48,11 +46,9 @@ worker_listeners:
|
||||
|
||||
- type: http
|
||||
port: $(expr $HTTP_START_PORT + $i)
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client]
|
||||
|
||||
#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid
|
||||
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
|
||||
EOF
|
||||
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
|
||||
@@ -95,9 +91,7 @@ Simply run the script to create YAML files in the current folder and print out t
|
||||
|
||||
```console
|
||||
$ ./create_stream_writers.sh
|
||||
```
|
||||
You should receive an output similar to the following:
|
||||
```console
|
||||
|
||||
# Add these lines to your homeserver.yaml.
|
||||
# Don't forget to configure your reverse proxy and
|
||||
# necessary endpoints to their respective worker.
|
||||
|
||||
3
debian/build_virtualenv
vendored
3
debian/build_virtualenv
vendored
@@ -31,11 +31,12 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
|
||||
esac
|
||||
|
||||
# Manually install Poetry and export a pip-compatible `requirements.txt`
|
||||
# We need a Poetry pre-release as the export command is buggy in < 1.2
|
||||
TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.3.2
|
||||
pip install poetry==1.2.0
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
|
||||
214
debian/changelog
vendored
214
debian/changelog
vendored
@@ -1,217 +1,3 @@
|
||||
matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.82.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
|
||||
|
||||
matrix-synapse-py3 (1.79.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Mar 2023 12:03:49 +0000
|
||||
|
||||
matrix-synapse-py3 (1.78.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.78.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Feb 2023 08:56:03 -0800
|
||||
|
||||
matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium
|
||||
|
||||
* Add `matrix-org-archive-keyring` package as recommended.
|
||||
* New Synapse release 1.78.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Feb 2023 14:29:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.77.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.77.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Feb 2023 12:59:02 +0100
|
||||
|
||||
matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.77.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Feb 2023 12:44:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.77.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Feb 2023 13:45:14 +0000
|
||||
|
||||
matrix-synapse-py3 (1.76.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.76.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Jan 2023 08:21:47 -0800
|
||||
|
||||
matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.76.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 27 Jan 2023 11:17:57 +0000
|
||||
|
||||
matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium
|
||||
|
||||
* Use Poetry 1.3.2 to manage the bundled virtualenv included with this package.
|
||||
* New Synapse release 1.76.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 25 Jan 2023 16:21:16 +0000
|
||||
|
||||
matrix-synapse-py3 (1.75.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.75.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jan 2023 11:36:02 +0000
|
||||
|
||||
matrix-synapse-py3 (1.75.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.75.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 12 Jan 2023 10:30:15 -0800
|
||||
|
||||
matrix-synapse-py3 (1.75.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.75.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Jan 2023 12:18:27 +0000
|
||||
|
||||
matrix-synapse-py3 (1.74.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.74.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
|
||||
|
||||
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
|
||||
|
||||
* New dependency on libicu-dev to provide improved results for user
|
||||
search.
|
||||
* New Synapse release 1.74.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Dec 2022 11:48:56 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.70.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0.
|
||||
|
||||
3
debian/control
vendored
3
debian/control
vendored
@@ -8,8 +8,6 @@ Build-Depends:
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
libicu-dev,
|
||||
pkg-config,
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
@@ -37,7 +35,6 @@ Depends:
|
||||
# so we put perl:Depends in Suggests rather than Depends.
|
||||
Recommends:
|
||||
${shlibs1:Recommends},
|
||||
matrix-org-archive-keyring,
|
||||
Suggests:
|
||||
sqlite3,
|
||||
${perl:Depends},
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@@ -1,50 +0,0 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Synapse development"
|
||||
copyright = "2023, The Matrix.org Foundation C.I.C."
|
||||
author = "The Synapse Maintainers and Community"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"autodoc2",
|
||||
"myst_parser",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
|
||||
# -- Options for Autodoc2 ----------------------------------------------------
|
||||
|
||||
autodoc2_docstring_parser_regexes = [
|
||||
# this will render all docstrings as 'MyST' Markdown
|
||||
(r".*", "myst"),
|
||||
]
|
||||
|
||||
autodoc2_packages = [
|
||||
{
|
||||
"path": "../synapse",
|
||||
# Don't render documentation for everything as a matter of course
|
||||
"auto_mode": False,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# -- Options for MyST (Markdown) ---------------------------------------------
|
||||
|
||||
# myst_heading_anchors = 2
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = ["_static"]
|
||||
@@ -1,22 +0,0 @@
|
||||
.. Synapse Developer Documentation documentation master file, created by
|
||||
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to the Synapse Developer Documentation!
|
||||
===========================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
modules/federation_sender
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
@@ -1,5 +0,0 @@
|
||||
Federation Sender
|
||||
=================
|
||||
|
||||
```{autodoc2-docstring} synapse.federation.sender
|
||||
```
|
||||
@@ -17,10 +17,16 @@
|
||||
|
||||
# Irritatingly, there is no blessed guide on how to distribute an application with its
|
||||
# poetry-managed environment in a docker image. We have opted for
|
||||
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
|
||||
# in `poetry export` in the past.
|
||||
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
|
||||
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
|
||||
# In case we get bitten by those bugs in the future, the recommendations here might
|
||||
# be useful:
|
||||
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
|
||||
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
@@ -34,31 +40,16 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
# Here we use it to set up a cache for apt (and below for pip), to improve
|
||||
# rebuild speeds on slow connections.
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential curl git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install rust and ensure its in the PATH.
|
||||
# (Rust may be needed to compile `cryptography`---which is one of poetry's
|
||||
# dependencies---on platforms that don't have a `cryptography` wheel.
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential cargo git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user "poetry==1.3.2"
|
||||
pip install --user "poetry==1.2.0"
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
@@ -79,9 +70,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
# Otherwise, just create an empty requirements file so that the Dockerfile can
|
||||
# proceed.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
|
||||
else \
|
||||
touch /synapse/requirements.txt; \
|
||||
touch /synapse/requirements.txt; \
|
||||
fi
|
||||
|
||||
###
|
||||
@@ -91,24 +82,22 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
@@ -117,7 +106,7 @@ ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
@@ -146,12 +135,10 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
# Install the synapse package itself.
|
||||
# If we have populated requirements.txt, we don't install any dependencies
|
||||
# as we should already have those from the previous `pip install` step.
|
||||
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
||||
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
|
||||
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
else \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
fi
|
||||
|
||||
###
|
||||
@@ -166,20 +153,19 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
@@ -190,4 +176,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -36,10 +36,8 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
# Debian sid. TODO: Switch back to upstream once https://github.com/spotify/dh-virtualenv/pull/354 has merged.
|
||||
RUN mkdir /dh-virtualenv
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/matrix-org/dh-virtualenv/archive/refs/tags/matrixorg-2023010302.tar.gz
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz
|
||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||
|
||||
# install its build deps. We do another apt-cache-update here, because we might
|
||||
@@ -86,8 +84,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
@@ -96,7 +92,7 @@ ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
@@ -24,7 +23,7 @@ FROM debian:bullseye-slim AS deps_base
|
||||
FROM redis:6-bullseye AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
|
||||
# Install supervisord with pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
@@ -41,11 +40,7 @@ FROM $FROM
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
RUN chown www-data /var/log/nginx /var/lib/nginx
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
@@ -241,4 +241,4 @@ healthcheck:
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
|
||||
@@ -7,9 +7,8 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
|
||||
FROM $FROM
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
# since for repeated rebuilds, this is much faster than apt installing
|
||||
# postgres each time.
|
||||
|
||||
@@ -6,7 +6,7 @@ set -e
|
||||
|
||||
echo "Complement Synapse launcher"
|
||||
echo " Args: $@"
|
||||
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
|
||||
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
|
||||
|
||||
function log {
|
||||
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
|
||||
@@ -45,13 +45,9 @@ esac
|
||||
|
||||
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# Specify the workers to test with
|
||||
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
|
||||
# utilizing WORKERS=1 for backwards compatibility.
|
||||
# -n True if the length of string is non-zero.
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister:2, \
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
background_worker, \
|
||||
frontend_proxy, \
|
||||
event_creator, \
|
||||
@@ -61,13 +57,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
federation_reader, \
|
||||
federation_sender, \
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher, \
|
||||
stream_writers=account_data+presence+receipts+to_device+typing"
|
||||
pusher"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
@@ -76,17 +68,6 @@ else
|
||||
fi
|
||||
|
||||
|
||||
if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then
|
||||
if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then
|
||||
export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1"
|
||||
else
|
||||
export SYNAPSE_ASYNC_IO_REACTOR="1"
|
||||
fi
|
||||
else
|
||||
export SYNAPSE_ASYNC_IO_REACTOR="0"
|
||||
fi
|
||||
|
||||
|
||||
# Add Complement's appservice registration directory, if there is one
|
||||
# (It can be absent when there are no application services in this test!)
|
||||
if [ -d /complement/appservice ]; then
|
||||
|
||||
@@ -12,8 +12,6 @@ trusted_key_servers: []
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
url_preview_enabled: true
|
||||
url_preview_ip_range_blacklist: []
|
||||
|
||||
## Registration ##
|
||||
|
||||
@@ -92,18 +90,18 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable spaces support
|
||||
spaces_enabled: true
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# server-side support for partial state in /send_join responses
|
||||
msc3706_enabled: true
|
||||
{% if not workers_in_use %}
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
# Enable support for polls
|
||||
msc3381_polls_enabled: true
|
||||
# Enable deleting device-specific notification settings stored in account data
|
||||
msc3890_enabled: true
|
||||
# Enable removing account data support
|
||||
msc3391_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
{% endif %}
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
|
||||
server_notices:
|
||||
system_mxid_localpart: _server
|
||||
|
||||
@@ -19,15 +19,8 @@
|
||||
# The environment variables it reads are:
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
||||
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
||||
# multiply that worker. Append multiple worker types with '+' to merge the
|
||||
# worker types into a single worker. Add a name and a '=' to the front of a
|
||||
# worker type to give this instance a name in logs and nginx.
|
||||
# Examples:
|
||||
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -46,46 +39,23 @@
|
||||
# continue to work if so.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
SupportsIndex,
|
||||
)
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
WORKER_PLACEHOLDER_NAME = "placeholder_name"
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
# Watching /_matrix/media and related needs a "media" listener
|
||||
# Stream Writers require "client" and "replication" listeners because they
|
||||
# have to attach by instance_map to the master process and have client endpoints.
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.pusher",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {},
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
@@ -94,13 +64,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {
|
||||
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.media_repository",
|
||||
"listener_resources": ["media"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
@@ -110,27 +78,21 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
|
||||
},
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {
|
||||
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.federation_sender",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {},
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
@@ -145,40 +107,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"client_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
|
||||
"^/_matrix/client/v1/rooms/.*/hierarchy$",
|
||||
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
|
||||
"^/_matrix/client/v1/rooms/.*/threads$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
|
||||
"^/_matrix/client/versions$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register/available$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
|
||||
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
@@ -197,7 +125,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/v1/timestamp_to_event/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
@@ -224,9 +151,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
# This worker cannot be sharded. Therefore, there should only ever be one
|
||||
# background worker. This is enforced for the safety of your database.
|
||||
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
|
||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
||||
# worker, and it should be named background_worker1
|
||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_creator": {
|
||||
@@ -237,7 +164,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
||||
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
|
||||
],
|
||||
@@ -245,54 +171,14 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
|
||||
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"presence": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"receipts": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
|
||||
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"to_device": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"typing": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -307,7 +193,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_base_name} {{
|
||||
upstream {upstream_worker_type} {{
|
||||
{body}
|
||||
}}
|
||||
"""
|
||||
@@ -315,19 +201,24 @@ upstream {upstream_worker_base_name} {{
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
"""Log something to the stdout.
|
||||
|
||||
Args:
|
||||
txt: The text to log.
|
||||
"""
|
||||
print(txt)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
print(txt, file=sys.stderr)
|
||||
"""Log something and exit with an error code.
|
||||
|
||||
Args:
|
||||
txt: The text to log in error.
|
||||
"""
|
||||
log(txt)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -356,46 +247,32 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
outfile.write(rendered)
|
||||
|
||||
|
||||
def add_worker_roles_to_shared_config(
|
||||
def add_sharding_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_types_set: Set[str],
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being
|
||||
converted to YAML)
|
||||
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
This list can be a single worker type or multiple.
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
worker_name: The name of the worker instance.
|
||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||
"""
|
||||
# The instance_map config field marks the workers that write to various replication
|
||||
# streams
|
||||
# The instance_map config field marks the workers that write to various replication streams
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
|
||||
# This is a list of the stream_writers that there can be only one of. Events can be
|
||||
# sharded, and therefore doesn't belong here.
|
||||
singular_stream_writers = [
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"to_device",
|
||||
"typing",
|
||||
]
|
||||
|
||||
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||
# roles, check each.
|
||||
if "pusher" in worker_types_set:
|
||||
# Worker-type specific sharding config
|
||||
if worker_type == "pusher":
|
||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||
|
||||
if "federation_sender" in worker_types_set:
|
||||
elif worker_type == "federation_sender":
|
||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||
|
||||
if "event_persister" in worker_types_set:
|
||||
elif worker_type == "event_persister":
|
||||
# Event persisters write to the events stream, so we need to update
|
||||
# the list of event stream writers
|
||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||
@@ -408,154 +285,9 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
for worker in worker_types_set:
|
||||
if worker in singular_stream_writers:
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
existing_dict: Optional[Dict[str, Any]],
|
||||
to_be_merged_dict: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""When given an existing dict of worker template configuration consisting with both
|
||||
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
|
||||
return new dict.
|
||||
|
||||
Args:
|
||||
existing_dict: Either an existing worker template or a fresh blank one.
|
||||
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
|
||||
existing_dict.
|
||||
Returns: The newly merged together dict values.
|
||||
"""
|
||||
new_dict: Dict[str, Any] = {}
|
||||
if not existing_dict:
|
||||
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
|
||||
new_dict = to_be_merged_dict.copy()
|
||||
else:
|
||||
for i in to_be_merged_dict.keys():
|
||||
if (i == "endpoint_patterns") or (i == "listener_resources"):
|
||||
# merge the two lists, remove duplicates
|
||||
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
|
||||
elif i == "shared_extra_conf":
|
||||
# merge dictionary's, the worker name will be replaced later
|
||||
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
|
||||
elif i == "worker_extra_conf":
|
||||
# There is only one worker type that has a 'worker_extra_conf' and it is
|
||||
# the media_repo. Since duplicate worker types on the same worker don't
|
||||
# work, this is fine.
|
||||
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
|
||||
else:
|
||||
# Everything else should be identical, like "app", which only works
|
||||
# because all apps are now generic_workers.
|
||||
new_dict[i] = to_be_merged_dict[i]
|
||||
return new_dict
|
||||
|
||||
|
||||
def insert_worker_name_for_worker_config(
|
||||
existing_dict: Dict[str, Any], worker_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Insert a given worker name into the worker's configuration dict.
|
||||
|
||||
Args:
|
||||
existing_dict: The worker_config dict that is imported into shared_config.
|
||||
worker_name: The name of the worker to insert.
|
||||
Returns: Copy of the dict with newly inserted worker name
|
||||
"""
|
||||
dict_to_edit = existing_dict.copy()
|
||||
for k, v in dict_to_edit["shared_extra_conf"].items():
|
||||
# Only proceed if it's the placeholder name string
|
||||
if v == WORKER_PLACEHOLDER_NAME:
|
||||
dict_to_edit["shared_extra_conf"][k] = worker_name
|
||||
return dict_to_edit
|
||||
|
||||
|
||||
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
||||
"""
|
||||
Apply multiplier(if found) by returning a new expanded list with some basic error
|
||||
checking.
|
||||
|
||||
Args:
|
||||
worker_types: The unprocessed List of requested workers
|
||||
Returns:
|
||||
A new list with all requested workers expanded.
|
||||
"""
|
||||
# Checking performed:
|
||||
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
||||
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
||||
# given
|
||||
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
||||
# scripting and automated expansion and is intended behaviour.
|
||||
# 4. if worker:NaN or is a negative number, it will error and log it.
|
||||
new_worker_types = []
|
||||
for worker_type in worker_types:
|
||||
if ":" in worker_type:
|
||||
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
||||
worker_count = 0
|
||||
# Should only be 2 components, a type of worker(s) and an integer as a
|
||||
# string. Cast the number as an int then it can be used as a counter.
|
||||
try:
|
||||
worker_count = int(worker_type_components[1])
|
||||
except ValueError:
|
||||
error(
|
||||
f"Bad number in worker count for '{worker_type}': "
|
||||
f"'{worker_type_components[1]}' is not an integer"
|
||||
)
|
||||
|
||||
# As long as there are more than 0, we add one to the list to make below.
|
||||
for _ in range(worker_count):
|
||||
new_worker_types.append(worker_type_components[0])
|
||||
|
||||
else:
|
||||
# If it's not a real worker_type, it will error out later.
|
||||
new_worker_types.append(worker_type)
|
||||
return new_worker_types
|
||||
|
||||
|
||||
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
|
||||
"""Helper to check to make sure worker types that cannot have multiples do not.
|
||||
|
||||
Args:
|
||||
worker_type: The type of worker to check against.
|
||||
Returns: True if allowed, False if not
|
||||
"""
|
||||
return worker_type not in [
|
||||
"background_worker",
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"typing",
|
||||
"to_device",
|
||||
]
|
||||
|
||||
|
||||
def split_and_strip_string(
|
||||
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
||||
) -> List[str]:
|
||||
"""
|
||||
Helper to split a string on split_char and strip whitespace from each end of each
|
||||
element.
|
||||
Args:
|
||||
given_string: The string to split
|
||||
split_char: The character to split the string on
|
||||
max_split: kwarg for split() to limit how many times the split() happens
|
||||
Returns:
|
||||
A List of strings
|
||||
"""
|
||||
# Removes whitespace from ends of result strings before adding to list. Allow for
|
||||
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
||||
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -567,156 +299,32 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
requested_worker_types: List[str],
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Read the desired list of requested workers and prepare the data for use in
|
||||
generating worker config files while also checking for potential gotchas.
|
||||
|
||||
Args:
|
||||
requested_worker_types: The list formed from the split environment variable
|
||||
containing the unprocessed requests for workers.
|
||||
|
||||
Returns: A dict of worker names to set of worker types. Format:
|
||||
{'worker_name':
|
||||
{'worker_type', 'worker_type2'}
|
||||
}
|
||||
"""
|
||||
# A counter of worker_base_name -> int. Used for determining the name for a given
|
||||
# worker when generating its config file, as each worker's name is just
|
||||
# worker_base_name followed by instance number
|
||||
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# Similar to above, but more finely grained. This is used to determine we don't have
|
||||
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
||||
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# The final result of all this processing
|
||||
dict_to_return: Dict[str, Set[str]] = {}
|
||||
|
||||
# Handle any multipliers requested for given workers.
|
||||
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
||||
requested_worker_types
|
||||
)
|
||||
|
||||
# Process each worker_type_string
|
||||
# Examples of expected formats:
|
||||
# - requested_name=type1+type2+type3
|
||||
# - synchrotron
|
||||
# - event_creator+event_persister
|
||||
for worker_type_string in multiple_processed_worker_types:
|
||||
# First, if a name is requested, use that — otherwise generate one.
|
||||
worker_base_name: str = ""
|
||||
if "=" in worker_type_string:
|
||||
# Split on "=", remove extra whitespace from ends then make list
|
||||
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
||||
if len(worker_type_split) > 2:
|
||||
error(
|
||||
"There should only be one '=' in the worker type string. "
|
||||
f"Please fix: {worker_type_string}"
|
||||
)
|
||||
|
||||
# Assign the name
|
||||
worker_base_name = worker_type_split[0]
|
||||
|
||||
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
||||
# Apply a fairly narrow regex to the worker names. Some characters
|
||||
# aren't safe for use in file paths or nginx configurations.
|
||||
# Don't allow to end with a number because we'll add a number
|
||||
# ourselves in a moment.
|
||||
error(
|
||||
"Invalid worker name; please choose a name consisting of "
|
||||
"alphanumeric letters, _ + -, but not ending with a digit: "
|
||||
f"{worker_base_name!r}"
|
||||
)
|
||||
|
||||
# Continue processing the remainder of the worker_type string
|
||||
# with the name override removed.
|
||||
worker_type_string = worker_type_split[1]
|
||||
|
||||
# Split the worker_type_string on "+", remove whitespace from ends then make
|
||||
# the list a set so it's deduplicated.
|
||||
worker_types_set: Set[str] = set(
|
||||
split_and_strip_string(worker_type_string, "+")
|
||||
)
|
||||
|
||||
if not worker_base_name:
|
||||
# No base name specified: generate one deterministically from set of
|
||||
# types
|
||||
worker_base_name = "+".join(sorted(worker_types_set))
|
||||
|
||||
# At this point, we have:
|
||||
# worker_base_name which is the name for the worker, without counter.
|
||||
# worker_types_set which is the set of worker types for this worker.
|
||||
|
||||
# Validate worker_type and make sure we don't allow sharding for a worker type
|
||||
# that doesn't support it. Will error and stop if it is a problem,
|
||||
# e.g. 'background_worker'.
|
||||
for worker_type in worker_types_set:
|
||||
# Verify this is a real defined worker type. If it's not, stop everything so
|
||||
# it can be fixed.
|
||||
if worker_type not in WORKERS_CONFIG:
|
||||
error(
|
||||
f"{worker_type} is an unknown worker type! Was found in "
|
||||
f"'{worker_type_string}'. Please fix!"
|
||||
)
|
||||
|
||||
if worker_type in worker_type_shard_counter:
|
||||
if not is_sharding_allowed_for_worker_type(worker_type):
|
||||
error(
|
||||
f"There can be only a single worker with {worker_type} "
|
||||
"type. Please recount and remove."
|
||||
)
|
||||
# Not in shard counter, must not have seen it yet, add it.
|
||||
worker_type_shard_counter[worker_type] += 1
|
||||
|
||||
# Generate the number for the worker using incrementing counter
|
||||
worker_base_name_counter[worker_base_name] += 1
|
||||
worker_number = worker_base_name_counter[worker_base_name]
|
||||
worker_name = f"{worker_base_name}{worker_number}"
|
||||
|
||||
if worker_number > 1:
|
||||
# If this isn't the first worker, check that we don't have a confusing
|
||||
# mixture of worker types with the same base name.
|
||||
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
||||
if first_worker_with_base_name != worker_types_set:
|
||||
error(
|
||||
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
||||
f"{worker_types_set!r}. It is already in use by "
|
||||
f"worker_type(s): {first_worker_with_base_name!r}"
|
||||
)
|
||||
|
||||
dict_to_return[worker_name] = worker_types_set
|
||||
|
||||
return dict_to_return
|
||||
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
environ: Mapping[str, str],
|
||||
config_path: str,
|
||||
data_dir: str,
|
||||
requested_worker_types: Dict[str, Set[str]],
|
||||
environ: Mapping[str, str], config_path: str, data_dir: str
|
||||
) -> None:
|
||||
"""Read the desired workers(if any) that is passed in and generate shared
|
||||
homeserver, nginx and supervisord configs.
|
||||
"""Read the desired list of workers from environment variables and generate
|
||||
shared homeserver, nginx and supervisord configs.
|
||||
|
||||
Args:
|
||||
environ: os.environ instance.
|
||||
config_path: The location of the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
requested_worker_types: A Dict containing requested workers in the format of
|
||||
{'worker_name1': {'worker_type', ...}}
|
||||
"""
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
||||
# the main Synapse process as well as all workers.
|
||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result to the shared
|
||||
# config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
@@ -732,9 +340,9 @@ def generate_worker_files(
|
||||
listeners += original_listeners
|
||||
|
||||
# The shared homeserver config. The contents of which will be inserted into the
|
||||
# base shared worker jinja2 template. This config file will be passed to all
|
||||
# workers, included Synapse's main process. It is intended mainly for disabling
|
||||
# functionality when certain workers are spun up, and adding a replication listener.
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||
|
||||
# List of dicts that describe workers.
|
||||
@@ -742,20 +350,31 @@ def generate_worker_files(
|
||||
# program blocks.
|
||||
worker_descriptors: List[Dict[str, Any]] = []
|
||||
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
||||
# type to the ports of each worker. For example:
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
||||
# ports of each worker. For example:
|
||||
# {
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
||||
# will be placed after the proxy_pass directive. The main benefit to representing
|
||||
# this data as a dict over a str is that we can easily deduplicate endpoints
|
||||
# across multiple instances of the same worker. The final rendering will be combined
|
||||
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
||||
nginx_locations: Dict[str, str] = {}
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
||||
# of the same worker.
|
||||
#
|
||||
# An nginx site config that will be amended to depending on the workers that are
|
||||
# spun up. To be placed in /etc/nginx/conf.d.
|
||||
nginx_locations = {}
|
||||
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma
|
||||
worker_types = worker_types_env.split(",")
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -763,57 +382,69 @@ def generate_worker_files(
|
||||
# Start worker ports from this arbitrary port
|
||||
worker_port = 18009
|
||||
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter: Dict[str, int] = {}
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
# Map locations to upstreams (corresponding to worker types) in Nginx
|
||||
# but only if we use the appropriate worker type
|
||||
for worker_type in all_worker_types_in_use:
|
||||
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
|
||||
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_type = worker_type.strip()
|
||||
|
||||
# For each worker type specified by the user, create config values and write it's
|
||||
# yaml config file
|
||||
for worker_name, worker_types_set in requested_worker_types.items():
|
||||
# The collected and processed data will live here.
|
||||
worker_config: Dict[str, Any] = {}
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
|
||||
# Merge all worker config templates for this worker into a single config
|
||||
for worker_type in worker_types_set:
|
||||
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
|
||||
|
||||
# Merge worker type template configuration data. It's a combination of lists
|
||||
# and dicts, so use this helper.
|
||||
worker_config = merge_worker_template_configs(
|
||||
worker_config, copy_of_template_config
|
||||
)
|
||||
|
||||
# Replace placeholder names in the config template with the actual worker name.
|
||||
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
|
||||
# Name workers by their type concatenated with an incrementing number
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker_type specific options. The first of a
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_types_set, worker_name, worker_port
|
||||
)
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
if worker_type_total_count > 1:
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_sharding_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
||||
for pattern in worker_config["endpoint_patterns"]:
|
||||
# Determine whether we need to load-balance this worker
|
||||
if worker_type_total_count > 1:
|
||||
# Create or add to a load-balanced upstream for this worker
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
# Upstreams are named after the worker_type
|
||||
upstream = "http://" + worker_type
|
||||
else:
|
||||
upstream = "http://localhost:%d" % (worker_port,)
|
||||
|
||||
# Note that this endpoint should proxy to this upstream
|
||||
nginx_locations[pattern] = upstream
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||
|
||||
# Then a worker config file
|
||||
@@ -824,10 +455,6 @@ def generate_worker_files(
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
for worker_type in worker_types_set:
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
worker_port += 1
|
||||
|
||||
# Build the nginx location config blocks
|
||||
@@ -840,14 +467,15 @@ def generate_worker_files(
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
upstream_worker_base_name=upstream_worker_base_name,
|
||||
upstream_worker_type=upstream_worker_type,
|
||||
body=body,
|
||||
)
|
||||
|
||||
@@ -868,7 +496,7 @@ def generate_worker_files(
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
workers_in_use = len(requested_worker_types) > 0
|
||||
workers_in_use = len(worker_types) > 0
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -964,53 +592,26 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
if not os.path.exists(config_path):
|
||||
log("Generating base homeserver config")
|
||||
generate_base_homeserver_config()
|
||||
else:
|
||||
log("Base homeserver config exists—not regenerating")
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of
|
||||
# file). Don't re-configure workers in this instance.
|
||||
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
||||
# Don't re-configure workers in this instance.
|
||||
mark_filepath = "/conf/workers_have_been_configured"
|
||||
if not os.path.exists(mark_filepath):
|
||||
# Collect and validate worker_type requests
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
# Only process worker_types if they exist
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
requested_worker_types: Dict[str, Any] = {}
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = split_and_strip_string(worker_types_env, ",")
|
||||
requested_worker_types = parse_worker_types(worker_types)
|
||||
|
||||
# Always regenerate all other config files
|
||||
log("Generating worker config files")
|
||||
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
||||
generate_worker_files(environ, config_path, data_dir)
|
||||
|
||||
# Mark workers as being configured
|
||||
with open(mark_filepath, "w") as f:
|
||||
f.write("")
|
||||
else:
|
||||
log("Worker config exists—not regenerating")
|
||||
|
||||
# Lifted right out of start.py
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# Start supervisord, which will start Synapse, all of the configured worker
|
||||
# processes, redis, nginx etc. according to the config we created above.
|
||||
log("Starting supervisord")
|
||||
flush_buffers()
|
||||
os.execle(
|
||||
os.execl(
|
||||
"/usr/local/bin/supervisord",
|
||||
"supervisord",
|
||||
"-c",
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
environ,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# This dockerfile builds an editable install of Synapse.
|
||||
#
|
||||
# Used by `complement.sh`. Not suitable for production use.
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
|
||||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
# Install Rust and other dependencies (stolen from normal Dockerfile)
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
# Make a base copy of the editable source tree, so that we have something to
|
||||
# install and build now — even though it's going to be covered up by a mount
|
||||
# at runtime.
|
||||
COPY synapse /editable-src/synapse/
|
||||
COPY rust /editable-src/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
|
||||
|
||||
RUN pip install poetry
|
||||
RUN poetry config virtualenvs.create false
|
||||
RUN cd /editable-src && poetry install --extras all
|
||||
|
||||
# Make copies of useful things for inspection:
|
||||
# - the Rust module (must be copied to the editable source tree before startup)
|
||||
# - poetry.lock is useful for checking if dependencies have changed.
|
||||
RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
|
||||
RUN cp /editable-src/poetry.lock /poetry.lock.bak
|
||||
|
||||
|
||||
### Extra setup from original Dockerfile
|
||||
COPY ./docker/start.py /start.py
|
||||
COPY ./docker/conf /conf
|
||||
|
||||
EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
@@ -13,19 +13,14 @@ import jinja2
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
print(txt)
|
||||
print(txt, file=sys.stderr)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
print(txt, file=sys.stderr)
|
||||
log(txt)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -136,10 +131,10 @@ def generate_config_from_template(
|
||||
|
||||
if ownership is not None:
|
||||
log(f"Setting ownership on /data to {ownership}")
|
||||
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
subprocess.run(args, check=True)
|
||||
subprocess.check_output(args)
|
||||
|
||||
|
||||
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
|
||||
@@ -163,7 +158,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.run(["chown", ownership, data_dir], check=True)
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
# create a suitable log config from our template
|
||||
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
|
||||
@@ -190,7 +185,6 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
"--open-private-ports",
|
||||
]
|
||||
# log("running %s" % (args, ))
|
||||
flush_buffers()
|
||||
os.execv(sys.executable, args)
|
||||
|
||||
|
||||
@@ -273,10 +267,8 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = [sys.executable] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
flush_buffers()
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
flush_buffers()
|
||||
os.execve(sys.executable, args, environ)
|
||||
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [coturn TURN server](setup/turn/coturn.md)
|
||||
- [eturnal TURN server](setup/turn/eturnal.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
@@ -97,7 +95,6 @@
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
- [Single Sign-On]()
|
||||
- [SAML](development/saml.md)
|
||||
|
||||
@@ -5,7 +5,7 @@ use it, you must enable the account validity feature (under
|
||||
`account_validity`) in Synapse's configuration.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
## Renew account
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This API returns information about reported events.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
The api is:
|
||||
```
|
||||
@@ -169,17 +169,3 @@ The following fields are returned in the JSON response body:
|
||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||
have a canonical alias set.
|
||||
* `event_json`: object - Details of the original event that was reported.
|
||||
|
||||
# Delete a specific event report
|
||||
|
||||
This API deletes a specific event report. If the request is successful, the response body
|
||||
will be an empty JSON object.
|
||||
|
||||
The api is:
|
||||
```
|
||||
DELETE /_synapse/admin/v1/event_reports/<report_id>
|
||||
```
|
||||
|
||||
**URL parameters:**
|
||||
|
||||
* `report_id`: string - The ID of the event report.
|
||||
|
||||
@@ -6,7 +6,7 @@ Details about the format of the `media_id` and storage of the media in the file
|
||||
are documented under [media repository](../media_repository.md).
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
## List all media in a room
|
||||
|
||||
@@ -235,14 +235,6 @@ The following fields are returned in the JSON response body:
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/delete?before_ts=<before_ts>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
|
||||
@@ -251,7 +243,7 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.*
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||
last access, not the timestamp when the file was created.
|
||||
|
||||
@@ -11,7 +11,7 @@ Note that Synapse requires at least one message in each room, so it will never
|
||||
delete the last message in a room.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ local users. The server administrator must be in the room and have permission to
|
||||
invite users.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
## Parameters
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ server. There are various parameters available that allow for filtering and
|
||||
sorting the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
**Parameters**
|
||||
|
||||
@@ -400,7 +400,7 @@ sent to a room in a given timeframe. There are various parameters available
|
||||
that allow for filtering and ordering the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Returns information about all local media usage of users. Gives the
|
||||
possibility to filter them by time and user.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# User Admin API
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
## Query User Account
|
||||
|
||||
@@ -37,7 +37,6 @@ It returns a JSON body like the following:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
@@ -168,7 +167,6 @@ A response body like the following is returned:
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
@@ -179,7 +177,6 @@ A response body like the following is returned:
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
@@ -250,7 +247,6 @@ The following fields are returned in the JSON response body:
|
||||
- `user_type` - string - Type of the user. Normal users are type `None`.
|
||||
This allows user type specific behaviour. There are also types `support` and `bot`.
|
||||
- `deactivated` - bool - Status if that user has been marked as deactivated.
|
||||
- `erased` - bool - Status if that user has been marked as erased.
|
||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
@@ -1197,42 +1193,3 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/threepid/$medium/users/$address
|
||||
```
|
||||
|
||||
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - Value of the third-party ID.
|
||||
|
||||
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.72.0._
|
||||
|
||||
@@ -15,7 +15,6 @@ app_service_config_files:
|
||||
The format of the AS configuration file is as follows:
|
||||
|
||||
```yaml
|
||||
id: <your-AS-id>
|
||||
url: <base url of AS>
|
||||
as_token: <token AS will add to requests to HS>
|
||||
hs_token: <token HS will add to requests to AS>
|
||||
|
||||
@@ -10,17 +10,26 @@ The necessary tools are:
|
||||
|
||||
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
|
||||
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
|
||||
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
|
||||
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
|
||||
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
|
||||
|
||||
See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
|
||||
on how to install the above tools and run the linters.
|
||||
Install them with:
|
||||
|
||||
```sh
|
||||
pip install -e ".[lint,mypy]"
|
||||
```
|
||||
|
||||
The easiest way to run the lints is to invoke the linter script as follows.
|
||||
|
||||
```sh
|
||||
scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
It's worth noting that modern IDEs and text editors can run these tools
|
||||
automatically on save. It may be worth looking into whether this
|
||||
functionality is supported in your editor for a more convenient
|
||||
development workflow. It is not, however, recommended to run `mypy`
|
||||
on save as it takes a while and can be very resource intensive.
|
||||
development workflow. It is not, however, recommended to run `flake8` or `mypy`
|
||||
on save as they take a while and can be very resource intensive.
|
||||
|
||||
## General rules
|
||||
|
||||
|
||||
@@ -73,15 +73,6 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge
|
||||
not recommended, as it can be difficult to configure the TLS certificates correctly in
|
||||
this case, and it offers little advantage over `.well-known` delegation.
|
||||
|
||||
Please keep in mind that server delegation is a function of server-server communication,
|
||||
and as such using SRV DNS records will not cover use cases involving client-server comms.
|
||||
This means setting global client settings (such as a Jitsi endpoint, or disabling
|
||||
creating new rooms as encrypted by default, etc) will still require that you serve a file
|
||||
from the `https://<server_name>/.well-known/` endpoints defined in the spec! If you are
|
||||
considering using SRV DNS delegation to avoid serving files from this endpoint, consider
|
||||
the impact that you will not be able to change those client-based default values globally,
|
||||
and will be relegated to the featureset of the configuration of each individual client.
|
||||
|
||||
However, if you really need it, you can find some documentation on what such a
|
||||
record should look like and how Synapse will use it in [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
|
||||
|
||||
@@ -24,8 +24,6 @@ The code of Synapse is written in Python 3. To do pretty much anything, you'll n
|
||||
|
||||
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
|
||||
|
||||
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
@@ -67,7 +65,7 @@ pipx install poetry
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
for other installation methods.
|
||||
|
||||
Developing Synapse requires Poetry version 1.3.2 or later.
|
||||
Synapse requires Poetry version 1.2.0 or later.
|
||||
|
||||
Next, open a terminal and install dependencies as follows:
|
||||
|
||||
@@ -78,19 +76,6 @@ poetry install --extras all
|
||||
|
||||
This will install the runtime and developer dependencies for the project.
|
||||
|
||||
## Running Synapse via poetry
|
||||
|
||||
To start a local instance of Synapse in the locked poetry environment, create a config file:
|
||||
|
||||
```sh
|
||||
cp docs/sample_config.yaml homeserver.yaml
|
||||
```
|
||||
|
||||
Now edit homeserver.yaml, and run Synapse with:
|
||||
|
||||
```sh
|
||||
poetry run python -m synapse.app.homeserver -c homeserver.yaml
|
||||
```
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
@@ -119,8 +104,8 @@ regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
Synapse's code style is documented [here](../code_style.md). Please follow
|
||||
it, including the conventions for [configuration
|
||||
options and documentation](../code_style.md#configuration-code-and-documentation-format).
|
||||
it, including the conventions for the [sample configuration
|
||||
file](../code_style.md#configuration-file-format).
|
||||
|
||||
We welcome improvements and additions to our documentation itself! When
|
||||
writing new pages, please
|
||||
@@ -139,7 +124,7 @@ changes to the Rust code.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test" id="test-test-test"></a>
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
@@ -339,13 +324,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
- If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
|
||||
types you wish to test. A simple comma-delimited string containing the worker types
|
||||
defined from the `WORKERS_CONFIG` template in
|
||||
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
@@ -355,7 +333,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
If you want to format the output of the tests the same way as it looks in CI,
|
||||
install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
|
||||
install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
|
||||
|
||||
You can then use this incantation to format the tests appropriately:
|
||||
|
||||
@@ -396,7 +374,7 @@ To prepare a Pull Request, please:
|
||||
## Changelog
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by [Towncrier](https://github.com/twisted/towncrier).
|
||||
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the `changelog.d` directory named
|
||||
in the format of `PRnumber.type`. The type can be one of the following:
|
||||
@@ -412,7 +390,7 @@ This file will become part of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
|
||||
release, so the content of the file should be a short description of your
|
||||
change in the same style as the rest of the changelog. The file can contain Markdown
|
||||
formatting, and must end with a full stop (.) or an exclamation mark (!) for
|
||||
formatting, and should end with a full stop (.) or an exclamation mark (!) for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
@@ -438,7 +416,8 @@ chicken-and-egg problem.
|
||||
There are two options for solving this:
|
||||
|
||||
1. Open the PR without a changelog file, see what number you got, and *then*
|
||||
add the changelog file to your branch, or:
|
||||
add the changelog file to your branch (see [Updating your pull
|
||||
request](#updating-your-pull-request)), or:
|
||||
|
||||
1. Look at the [list of all
|
||||
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the
|
||||
|
||||
@@ -2,13 +2,6 @@
|
||||
|
||||
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
|
||||
|
||||
# Installing
|
||||
|
||||
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
|
||||
|
||||
Developers should use Poetry 1.3.2 or higher. If you encounter problems related
|
||||
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
|
||||
|
||||
# Background
|
||||
|
||||
Synapse uses a variety of third-party Python packages to function as a homeserver.
|
||||
@@ -130,7 +123,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
|
||||
## ...reset my venv to the locked environment?
|
||||
|
||||
```shell
|
||||
poetry install --all-extras --sync
|
||||
poetry install --extras all --remove-untracked
|
||||
```
|
||||
|
||||
## ...delete everything and start over from scratch?
|
||||
@@ -190,6 +183,7 @@ Either:
|
||||
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
|
||||
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
|
||||
`--extras` and `--optional` flags in particular.
|
||||
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
|
||||
|
||||
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
|
||||
|
||||
@@ -202,7 +196,7 @@ poetry remove packagename
|
||||
```
|
||||
|
||||
ought to do the trick. Alternatively, manually update `pyproject.toml` and
|
||||
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
|
||||
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
|
||||
files in your commit.
|
||||
|
||||
## ...update the version range for an existing dependency?
|
||||
@@ -246,6 +240,9 @@ poetry export --extras all
|
||||
|
||||
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
|
||||
|
||||
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
|
||||
be required.
|
||||
|
||||
## ...build a test wheel?
|
||||
|
||||
I usually use
|
||||
@@ -258,26 +255,12 @@ because [`build`](https://github.com/pypa/build) is a standardish tool which
|
||||
doesn't require poetry. (It's what we use in CI too). However, you could try
|
||||
`poetry build` too.
|
||||
|
||||
## ...handle a Dependabot pull request?
|
||||
|
||||
Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it
|
||||
creates a pull request a GitHub Action will run to automatically create a changelog
|
||||
file. Ensure that:
|
||||
|
||||
* the lockfile changes look reasonable;
|
||||
* the upstream changelog file (linked in the description) doesn't include any
|
||||
breaking changes;
|
||||
* continuous integration passes (due to permissions, the GitHub Actions run on
|
||||
the changelog commit will fail, look at the initial commit of the pull request);
|
||||
|
||||
In particular, any updates to the type hints (usually packages which start with `types-`)
|
||||
should be safe to merge if linting passes.
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
## Check the version of poetry with `poetry --version`.
|
||||
|
||||
The minimum version of poetry supported by Synapse is 1.3.2.
|
||||
The minimum version of poetry supported by Synapse is 1.2.
|
||||
|
||||
It can also be useful to check the version of `poetry-core` in use. If you've
|
||||
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
|
||||
|
||||
@@ -1,375 +0,0 @@
|
||||
# How do faster joins work?
|
||||
|
||||
This is a work-in-progress set of notes with two goals:
|
||||
- act as a reference, explaining how Synapse implements faster joins; and
|
||||
- record the rationale behind our choices.
|
||||
|
||||
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
|
||||
|
||||
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
|
||||
request a lightweight response to the federation `/send_join` endpoint.
|
||||
This is called a **faster join**, also known as a **partial join**. In these
|
||||
notes we'll usually use the word "partial" as it matches the database schema.
|
||||
|
||||
## Overview: processing events in a partially-joined room
|
||||
|
||||
The response to a partial join consists of
|
||||
- the requested join event `J`,
|
||||
- a list of the servers in the room (according to the state before `J`),
|
||||
- a subset of the state of the room before `J`,
|
||||
- the full auth chain of that state subset.
|
||||
|
||||
Synapse marks the room as partially joined by adding a row to the database table
|
||||
`partial_state_rooms`. It also marks the join event `J` as "partially stated",
|
||||
meaning that we have neither received nor computed the full state before/after
|
||||
`J`. This is done by adding a row to `partial_state_events`.
|
||||
|
||||
<details><summary>DB schema</summary>
|
||||
|
||||
```
|
||||
matrix=> \d partial_state_events
|
||||
Table "matrix.partial_state_events"
|
||||
Column │ Type │ Collation │ Nullable │ Default
|
||||
══════════╪══════╪═══════════╪══════════╪═════════
|
||||
room_id │ text │ │ not null │
|
||||
event_id │ text │ │ not null │
|
||||
|
||||
matrix=> \d partial_state_rooms
|
||||
Table "matrix.partial_state_rooms"
|
||||
Column │ Type │ Collation │ Nullable │ Default
|
||||
════════════════════════╪════════╪═══════════╪══════════╪═════════
|
||||
room_id │ text │ │ not null │
|
||||
device_lists_stream_id │ bigint │ │ not null │ 0
|
||||
join_event_id │ text │ │ │
|
||||
joined_via │ text │ │ │
|
||||
|
||||
matrix=> \d partial_state_rooms_servers
|
||||
Table "matrix.partial_state_rooms_servers"
|
||||
Column │ Type │ Collation │ Nullable │ Default
|
||||
═════════════╪══════╪═══════════╪══════════╪═════════
|
||||
room_id │ text │ │ not null │
|
||||
server_name │ text │ │ not null │
|
||||
```
|
||||
|
||||
Indices, foreign-keys and check constraints are omitted for brevity.
|
||||
</details>
|
||||
|
||||
While partially joined to a room, Synapse receives events `E` from remote
|
||||
homeservers as normal, and can create events at the request of its local users.
|
||||
However, we run into trouble when we enforce the [checks on an event].
|
||||
|
||||
> 1. Is a valid event, otherwise it is dropped. For an event to be valid, it
|
||||
must contain a room_id, and it must comply with the event format of that
|
||||
> room version.
|
||||
> 2. Passes signature checks, otherwise it is dropped.
|
||||
> 3. Passes hash checks, otherwise it is redacted before being processed further.
|
||||
> 4. Passes authorization rules based on the event’s auth events, otherwise it
|
||||
> is rejected.
|
||||
> 5. **Passes authorization rules based on the state before the event, otherwise
|
||||
> it is rejected.**
|
||||
> 6. **Passes authorization rules based on the current state of the room,
|
||||
> otherwise it is “soft failed”.**
|
||||
|
||||
[checks on an event]: https://spec.matrix.org/v1.5/server-server-api/#checks-performed-on-receipt-of-a-pdu
|
||||
|
||||
We can enforce checks 1--4 without any problems.
|
||||
But we cannot enforce checks 5 or 6 with complete certainty, since Synapse does
|
||||
not know the full state before `E`, nor that of the room.
|
||||
|
||||
### Partial state
|
||||
|
||||
Instead, we make a best-effort approximation.
|
||||
While the room is considered partially joined, Synapse tracks the "partial
|
||||
state" before events.
|
||||
This works in a similar way as regular state:
|
||||
|
||||
- The partial state before `J` is that given to us by the partial join response.
|
||||
- The partial state before an event `E` is the resolution of the partial states
|
||||
after each of `E`'s `prev_event`s.
|
||||
- If `E` is rejected or a message event, the partial state after `E` is the
|
||||
partial state before `E`.
|
||||
- Otherwise, the partial state after `E` is the partial state before `E`, plus
|
||||
`E` itself.
|
||||
|
||||
More concisely, partial state propagates just like full state; the only
|
||||
difference is that we "seed" it with an incomplete initial state.
|
||||
Synapse records that we have only calculated partial state for this event with
|
||||
a row in `partial_state_events`.
|
||||
|
||||
While the room remains partially stated, check 5 on incoming events to that
|
||||
room becomes:
|
||||
|
||||
> 5. Passes authorization rules based on **the resolution between the partial
|
||||
> state before `E` and `E`'s auth events.** If the event fails to pass
|
||||
> authorization rules, it is rejected.
|
||||
|
||||
Additionally, check 6 is deleted: no soft-failures are enforced.
|
||||
|
||||
While partially joined, the current partial state of the room is defined as the
|
||||
resolution across the partial states after all forward extremities in the room.
|
||||
|
||||
_Remark._ Events with partial state are _not_ considered
|
||||
[outliers](../room-dag-concepts.md#outliers).
|
||||
|
||||
### Approximation error
|
||||
|
||||
Using partial state means the auth checks can fail in a few different ways[^2].
|
||||
|
||||
[^2]: Is this exhaustive?
|
||||
|
||||
- We may erroneously accept an incoming event in check 5 based on partial state
|
||||
when it would have been rejected based on full state, or vice versa.
|
||||
- This means that an event could erroneously be added to the current partial
|
||||
state of the room when it would not be present in the full state of the room,
|
||||
or vice versa.
|
||||
- Additionally, we may have skipped soft-failing an event that would have been
|
||||
soft-failed based on full state.
|
||||
|
||||
(Note that the discrepancies described in the last two bullets are user-visible.)
|
||||
|
||||
This means that we have to be very careful when we want to lookup pieces of room
|
||||
state in a partially-joined room. Our approximation of the state may be
|
||||
incorrect or missing. But we can make some educated guesses. If
|
||||
|
||||
- our partial state is likely to be correct, or
|
||||
- the consequences of our partial state being incorrect are minor,
|
||||
|
||||
then we proceed as normal, and let the resync process fix up any mistakes (see
|
||||
below).
|
||||
|
||||
When is our partial state likely to be correct?
|
||||
|
||||
- It's more accurate the closer we are to the partial join event. (So we should
|
||||
ideally complete the resync as soon as possible.)
|
||||
- Non-member events: we will have received them as part of the partial join
|
||||
response, if they were part of the room state at that point. We may
|
||||
incorrectly accept or reject updates to that state (at first because we lack
|
||||
remote membership information; later because of compounding errors), so these
|
||||
can become incorrect over time.
|
||||
- Local members' memberships: we are the only ones who can create join and
|
||||
knock events for our users. We can't be completely confident in the
|
||||
correctness of bans, invites and kicks from other homeservers, but the resync
|
||||
process should correct any mistakes.
|
||||
- Remote members' memberships: we did not receive these in the /send_join
|
||||
response, so we have essentially no idea if these are correct or not.
|
||||
|
||||
In short, we deem it acceptable to trust the partial state for non-membership
|
||||
and local membership events. For remote membership events, we wait for the
|
||||
resync to complete, at which point we have the full state of the room and can
|
||||
proceed as normal.
|
||||
|
||||
### Fixing the approximation with a resync
|
||||
|
||||
The partial-state approximation is only a temporary affair. In the background,
|
||||
synapse beings a "resync" process. This is a continuous loop, starting at the
|
||||
partial join event and proceeding downwards through the event graph. For each
|
||||
`E` seen in the room since partial join, Synapse will fetch
|
||||
|
||||
- the event ids in the state of the room before `E`, via
|
||||
[`/state_ids`](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1state_idsroomid);
|
||||
- the event ids in the full auth chain of `E`, included in the `/state_ids`
|
||||
response; and
|
||||
- any events from the previous two bullets that Synapse hasn't persisted, via
|
||||
[`/state](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1stateroomid).
|
||||
|
||||
This means Synapse has (or can compute) the full state before `E`, which allows
|
||||
Synapse to properly authorise or reject `E`. At this point ,the event
|
||||
is considered to have "full state" rather than "partial state". We record this
|
||||
by removing `E` from the `partial_state_events` table.
|
||||
|
||||
\[**TODO:** Does Synapse persist a new state group for the full state
|
||||
before `E`, or do we alter the (partial-)state group in-place? Are state groups
|
||||
ever marked as partially-stated? \]
|
||||
|
||||
This scheme means it is possible for us to have accepted and sent an event to
|
||||
clients, only to reject it during the resync. From a client's perspective, the
|
||||
effect is similar to a retroactive
|
||||
state change due to state resolution---i.e. a "state reset".[^3]
|
||||
|
||||
[^3]: Clients should refresh caches to detect such a change. Rumour has it that
|
||||
sliding sync will fix this.
|
||||
|
||||
When all events since the join `J` have been fully-stated, the room resync
|
||||
process is complete. We record this by removing the room from
|
||||
`partial_state_rooms`.
|
||||
|
||||
## Faster joins on workers
|
||||
|
||||
For the time being, the resync process happens on the master worker.
|
||||
A new replication stream `un_partial_stated_room` is added. Whenever a resync
|
||||
completes and a partial-state room becomes fully stated, a new message is sent
|
||||
into that stream containing the room ID.
|
||||
|
||||
## Notes on specific cases
|
||||
|
||||
> **NB.** The notes below are rough. Some of them are hidden under `<details>`
|
||||
disclosures because they have yet to be implemented in mainline Synapse.
|
||||
|
||||
### Creating events during a partial join
|
||||
|
||||
When sending out messages during a partial join, we assume our partial state is
|
||||
accurate and proceed as normal. For this to have any hope of succeeding at all,
|
||||
our partial state must contain an entry for each of the (type, state key) pairs
|
||||
[specified by the auth rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules):
|
||||
|
||||
- `m.room.create`
|
||||
- `m.room.join_rules`
|
||||
- `m.room.power_levels`
|
||||
- `m.room.third_party_invite`
|
||||
- `m.room.member`
|
||||
|
||||
The first four of these should be present in the state before `J` that is given
|
||||
to us in the partial join response; only membership events are omitted. In order
|
||||
for us to consider the user joined, we must have their membership event. That
|
||||
means the only possible omission is the target's membership in an invite, kick
|
||||
or ban.
|
||||
|
||||
The worst possibility is that we locally invite someone who is banned according to
|
||||
the full state, because we lack their ban in our current partial state. The rest
|
||||
of the federation---at least, those who are fully joined---should correctly
|
||||
enforce the [membership transition constraints](
|
||||
https://spec.matrix.org/v1.3/client-server-api/#room-membership
|
||||
). So any the erroneous invite should be ignored by fully-joined
|
||||
homeservers and resolved by the resync for partially-joined homeservers.
|
||||
|
||||
|
||||
|
||||
In more generality, there are two problems we're worrying about here:
|
||||
|
||||
- We might create an event that is valid under our partial state, only to later
|
||||
find out that is actually invalid according to the full state.
|
||||
- Or: we might refuse to create an event that is invalid under our partial
|
||||
state, even though it would be perfectly valid under the full state.
|
||||
|
||||
However we expect such problems to be unlikely in practise, because
|
||||
|
||||
- We trust that the room has sensible power levels, e.g. that bad actors with
|
||||
high power levels are demoted before their ban.
|
||||
- We trust that the resident server provides us up-to-date power levels, join
|
||||
rules, etc.
|
||||
- State changes in rooms are relatively infrequent, and the resync period is
|
||||
relatively quick.
|
||||
|
||||
#### Sending out the event over federation
|
||||
|
||||
**TODO:** needs prose fleshing out.
|
||||
|
||||
Normally: send out in a fed txn to all HSes in the room.
|
||||
We only know that some HSes were in the room at some point. Wat do.
|
||||
Send it out to the list of servers from the first join.
|
||||
**TODO** what do we do here if we have full state?
|
||||
If the prev event was created by us, we can risk sending it to the wrong HS. (Motivation: privacy concern of the content. Not such a big deal for a public room or an encrypted room. But non-encrypted invite-only...)
|
||||
But don't want to send out sensitive data in other HS's events in this way.
|
||||
|
||||
Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do.
|
||||
What about if we didn't send them an event but shouldn't've?
|
||||
E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them.
|
||||
Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left.
|
||||
Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?)
|
||||
- Don't do this currently.
|
||||
- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages
|
||||
- Gap: rooms which are infrequently used and take a long time to resync.
|
||||
|
||||
### Joining after a partial join
|
||||
|
||||
**NB.** Not yet implemented.
|
||||
|
||||
<details>
|
||||
|
||||
**TODO:** needs prose fleshing out. Liase with Matthieu. Explain why /send_join
|
||||
(Rich was surprised we didn't just create it locally. Answer: to try and avoid
|
||||
a join which then gets rejected after resync.)
|
||||
|
||||
We don't know for sure that any join we create would be accepted.
|
||||
E.g. the joined user might have been banned; the join rules might have changed in a way that we didn't realise... some way in which the partial state was mistaken.
|
||||
Instead, do another partial make-join/send-join handshake to confirm that the join works.
|
||||
- Probably going to get a bunch of duplicate state events and auth events.... but the point of partial joins is that these should be small. Many are already persisted = good.
|
||||
- What if the second send_join response includes a different list of reisdent HSes? Could ignore it.
|
||||
- Could even have a special flag that says "just make me a join", i.e. don't bother giving me state or servers in room. Deffo want the auth chain tho.
|
||||
- SQ: wrt device lists it's a lot safer to ignore it!!!!!
|
||||
- What if the state at the second join is inconsistent with what we have? Ignore it?
|
||||
|
||||
</details>
|
||||
|
||||
### Leaving (and kicks and bans) after a partial join
|
||||
|
||||
**NB.** Not yet implemented.
|
||||
|
||||
<details>
|
||||
|
||||
When you're fully joined to a room, to have `U` leave a room their homeserver
|
||||
needs to
|
||||
|
||||
- create a new leave event for `U` which will be accepted by other homeservers,
|
||||
and
|
||||
- send that event `U` out to the homeservers in the federation.
|
||||
|
||||
When is a leave event accepted? See
|
||||
[v10 auth rules](https://spec.matrix.org/v1.5/rooms/v10/#authorization-rules):
|
||||
|
||||
> 4. If type is m.room.member: [...]
|
||||
>
|
||||
> 5. If membership is leave:
|
||||
>
|
||||
> 1. If the sender matches state_key, allow if and only if that user’s current membership state is invite, join, or knock.
|
||||
> 2. [...]
|
||||
|
||||
I think this means that (well-formed!) self-leaves are governed entirely by
|
||||
4.5.1. This means that if we correctly calculate state which says that `U` is
|
||||
invited, joined or knocked and include it in the leave's auth events, our event
|
||||
is accepted by checks 4 and 5 on incoming events.
|
||||
|
||||
> 4. Passes authorization rules based on the event’s auth events, otherwise
|
||||
> it is rejected.
|
||||
> 5. Passes authorization rules based on the state before the event, otherwise
|
||||
> it is rejected.
|
||||
|
||||
The only way to fail check 6 is if the receiving server's current state of the
|
||||
room says that `U` is banned, has left, or has no membership event. But this is
|
||||
fine: the receiving server already thinks that `U` isn't in the room.
|
||||
|
||||
> 6. Passes authorization rules based on the current state of the room,
|
||||
> otherwise it is “soft failed”.
|
||||
|
||||
For the second point (publishing the leave event), the best thing we can do is
|
||||
to is publish to all HSes we know to be currently in the room. If they miss that
|
||||
event, they might send us traffic in the room that we don't care about. This is
|
||||
a problem with leaving after a "full" join; we don't seek to fix this with
|
||||
partial joins.
|
||||
|
||||
(With that said: there's nothing machine-readable in the /send response. I don't
|
||||
think we can deduce "destination has left the room" from a failure to /send an
|
||||
event into that room?)
|
||||
|
||||
#### Can we still do this during a partial join?
|
||||
|
||||
We can create leave events and can choose what gets included in our auth events,
|
||||
so we can be sure that we pass check 4 on incoming events. For check 5, we might
|
||||
have an incorrect view of the state before an event.
|
||||
The only way we might erroneously think a leave is valid is if
|
||||
|
||||
- the partial state before the leave has `U` joined, invited or knocked, but
|
||||
- the full state before the leave has `U` banned, left or not present,
|
||||
|
||||
in which case the leave doesn't make anything worse: other HSes already consider
|
||||
us as not in the room, and will continue to do so after seeing the leave.
|
||||
|
||||
The remaining obstacle is then: can we safely broadcast the leave event? We may
|
||||
miss servers or incorrectly think that a server is in the room. Or the
|
||||
destination server may be offline and miss the transaction containing our leave
|
||||
event.This should self-heal when they see an event whose `prev_events` descends
|
||||
from our leave.
|
||||
|
||||
Another option we considered was to use federation `/send_leave` to ask a
|
||||
fully-joined server to send out the event on our behalf. But that introduces
|
||||
complexity without much benefit. Besides, as Rich put it,
|
||||
|
||||
> sending out leaves is pretty best-effort currently
|
||||
|
||||
so this is probably good enough as-is.
|
||||
|
||||
#### Cleanup after the last leave
|
||||
|
||||
**TODO**: what cleanup is necessary? Is it all just nice-to-have to save unused
|
||||
work?
|
||||
</details>
|
||||
@@ -16,21 +16,14 @@
|
||||
There are two methods of enabling the metrics endpoint in Synapse.
|
||||
|
||||
The first serves the metrics as a part of the usual web server and
|
||||
can be enabled by adding the `metrics` resource to the existing
|
||||
listener as such as in this example:
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
# added "metrics" in this line
|
||||
- names: [client, federation, metrics]
|
||||
compress: false
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
@@ -44,24 +37,14 @@
|
||||
to just internal networks easier. The served metrics are available
|
||||
over HTTP only, and will be available at `/_synapse/metrics`.
|
||||
|
||||
Add a new listener to homeserver.yaml as in this example:
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
# beginning of the new metrics listener
|
||||
- port: 9000
|
||||
type: metrics
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```
|
||||
|
||||
1. Restart Synapse.
|
||||
@@ -209,9 +192,6 @@ altogether in Synapse v1.73.0.**
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
| synapse_admin_mau_current | synapse_admin_mau:current |
|
||||
| synapse_admin_mau_max | synapse_admin_mau:max |
|
||||
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
@@ -103,9 +103,6 @@ Called during a logout request for a user. It is passed the qualified user ID, t
|
||||
deactivated device (if any: access tokens are occasionally created without an associated
|
||||
device ID), and the (now deactivated) access token.
|
||||
|
||||
Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them
|
||||
to still be present.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `get_username_for_registration`
|
||||
|
||||
@@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
|
||||
|
||||
```python
|
||||
async def check_media_file_for_spam(
|
||||
file_wrapper: "synapse.media.media_storage.ReadableFileWrapper",
|
||||
file_info: "synapse.media._base.FileInfo",
|
||||
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
|
||||
file_info: "synapse.rest.media.v1._base.FileInfo",
|
||||
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
|
||||
```
|
||||
|
||||
|
||||
@@ -146,9 +146,6 @@ Note that this callback is called when the event has already been processed and
|
||||
into the room, which means this callback cannot be used to deny persisting the event. To
|
||||
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
|
||||
|
||||
For any given event, this callback will be called on every worker process, even if that worker will not end up
|
||||
acting on that event. This callback will not be called for events that are marked as rejected.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `check_can_shutdown_room`
|
||||
@@ -254,11 +251,6 @@ If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
_First introduced in Synapse v1.56.0_
|
||||
|
||||
**<span style="color:red">
|
||||
This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which
|
||||
features the same functionality. The only difference is in name.
|
||||
</span>**
|
||||
|
||||
```python
|
||||
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
|
||||
```
|
||||
@@ -273,44 +265,6 @@ server_.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `on_add_user_third_party_identifier`
|
||||
|
||||
_First introduced in Synapse v1.79.0_
|
||||
|
||||
```python
|
||||
async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
|
||||
```
|
||||
|
||||
Called after successfully creating an association between a user and a third-party identifier
|
||||
(email address, phone number). The module is given the Matrix ID of the user the
|
||||
association is for, as well as the medium (`email` or `msisdn`) and address of the
|
||||
third-party identifier (i.e. an email address).
|
||||
|
||||
Note that this callback is _not_ called if a user attempts to bind their third-party identifier
|
||||
to an identity server (via a call to [`POST
|
||||
/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)).
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `on_remove_user_third_party_identifier`
|
||||
|
||||
_First introduced in Synapse v1.79.0_
|
||||
|
||||
```python
|
||||
async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
|
||||
```
|
||||
|
||||
Called after successfully removing an association between a user and a third-party identifier
|
||||
(email address, phone number). The module is given the Matrix ID of the user the
|
||||
association is for, as well as the medium (`email` or `msisdn`) and address of the
|
||||
third-party identifier (i.e. an email address).
|
||||
|
||||
Note that this callback is _not_ called if a user attempts to unbind their third-party
|
||||
identifier from an identity server (via a call to [`POST
|
||||
/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)).
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the third-party rules callback
|
||||
@@ -343,4 +297,4 @@ class EventCensorer:
|
||||
)
|
||||
event_dict["content"] = new_event_content
|
||||
return event_dict
|
||||
```
|
||||
```
|
||||
|
||||
@@ -59,8 +59,8 @@ namespace (such as anything under `/_matrix/client` for example). It is strongly
|
||||
recommended that modules register their web resources under the `/_synapse/client`
|
||||
namespace.
|
||||
|
||||
The provided resource is a Python class that implements Twisted's [IResource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.IResource.html)
|
||||
interface (such as [Resource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.Resource.html)).
|
||||
The provided resource is a Python class that implements Twisted's [IResource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.IResource.html)
|
||||
interface (such as [Resource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.Resource.html)).
|
||||
|
||||
Only one resource can be registered for a given path. If several modules attempt to
|
||||
register a resource for the same path, the module that appears first in Synapse's
|
||||
@@ -82,60 +82,4 @@ the callback name as the argument name and the function as its value. A
|
||||
`register_[...]_callbacks` method exists for each category.
|
||||
|
||||
Callbacks for each category can be found on their respective page of the
|
||||
[Synapse documentation website](https://matrix-org.github.io/synapse).
|
||||
|
||||
## Caching
|
||||
|
||||
_Added in Synapse 1.74.0._
|
||||
|
||||
Modules can leverage Synapse's caching tools to manage their own cached functions. This
|
||||
can be helpful for modules that need to repeatedly request the same data from the database
|
||||
or a remote service.
|
||||
|
||||
Functions that need to be wrapped with a cache need to be decorated with a `@cached()`
|
||||
decorator (which can be imported from `synapse.module_api`) and registered with the
|
||||
[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888)
|
||||
API when initialising the module. If the module needs to invalidate an entry in a cache,
|
||||
it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904)
|
||||
API, with the function to invalidate the cache of and the key(s) of the entry to
|
||||
invalidate.
|
||||
|
||||
Below is an example of a simple module using a cached function:
|
||||
|
||||
```python
|
||||
from typing import Any
|
||||
from synapse.module_api import cached, ModuleApi
|
||||
|
||||
class MyModule:
|
||||
def __init__(self, config: Any, api: ModuleApi):
|
||||
self.api = api
|
||||
|
||||
# Register the cached function so Synapse knows how to correctly invalidate
|
||||
# entries for it.
|
||||
self.api.register_cached_function(self.get_user_from_id)
|
||||
|
||||
@cached()
|
||||
async def get_department_for_user(self, user_id: str) -> str:
|
||||
"""A function with a cache."""
|
||||
# Request a department from an external service.
|
||||
return await self.http_client.get_json(
|
||||
"https://int.example.com/users", {"user_id": user_id)
|
||||
)["department"]
|
||||
|
||||
async def do_something_with_users(self) -> None:
|
||||
"""Calls the cached function and then invalidates an entry in its cache."""
|
||||
|
||||
user_id = "@alice:example.com"
|
||||
|
||||
# Get the user. Since get_department_for_user is wrapped with a cache,
|
||||
# the return value for this user_id will be cached.
|
||||
department = await self.get_department_for_user(user_id)
|
||||
|
||||
# Do something with `department`...
|
||||
|
||||
# Let's say something has changed with our user, and the entry we have for
|
||||
# them in the cache is out of date, so we want to invalidate it.
|
||||
await self.api.invalidate_cache(self.get_department_for_user, (user_id,))
|
||||
```
|
||||
|
||||
See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details.
|
||||
[Synapse documentation website](https://matrix-org.github.io/synapse).
|
||||
823
docs/openid.md
823
docs/openid.md
@@ -49,13 +49,6 @@ setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## OIDC Back-Channel Logout
|
||||
|
||||
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
|
||||
|
||||
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
|
||||
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
@@ -88,43 +81,93 @@ oidc_providers:
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Apple
|
||||
### Dex
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider with an
|
||||
external database, it can be configured with static passwords in a config file.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
|
||||
to install Dex.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
|
||||
has more information on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse public baseurl]/_synapse/client/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dev.yaml`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: dex
|
||||
idp_name: "My Dex server"
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
```
|
||||
### Keycloak
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
|
||||
2. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
|
||||
3. Click `Save`
|
||||
4. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Enabled | `On` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Client Authenticator | `Client ID and Secret` |
|
||||
|
||||
7. Click `Regenerate Secret`
|
||||
8. Copy Secret
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
### Auth0
|
||||
|
||||
[Auth0][auth0] is a hosted SaaS IdP solution.
|
||||
@@ -205,43 +248,284 @@ oidc_providers:
|
||||
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
|
||||
```
|
||||
|
||||
### Dex
|
||||
### LemonLDAP
|
||||
|
||||
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider with an
|
||||
external database, it can be configured with static passwords in a config file.
|
||||
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
|
||||
|
||||
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
|
||||
to install Dex.
|
||||
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
|
||||
2. The parameters are:
|
||||
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
|
||||
Client ID`)
|
||||
- Client secret (`Options > Basic > Client secret`)
|
||||
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
|
||||
(`Options > Security > ID Token signature algorithm` and `Options > Security >
|
||||
Access Token signature algorithm`)
|
||||
- Scopes: OpenID, Email and Profile
|
||||
- Allowed redirection addresses for login (`Options > Basic > Allowed
|
||||
redirection addresses for login` ) :
|
||||
`[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
```yaml
|
||||
staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse public baseurl]/_synapse/client/oidc/callback'
|
||||
name: 'Synapse'
|
||||
oidc_providers:
|
||||
- idp_id: lemonldap
|
||||
idp_name: lemonldap
|
||||
discover: true
|
||||
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
|
||||
client_id: "your client id" # TO BE FILLED
|
||||
client_secret: "your client secret" # TO BE FILLED
|
||||
scopes:
|
||||
- "openid"
|
||||
- "profile"
|
||||
- "email"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}}"
|
||||
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
|
||||
display_name_template: "{{ user.preferred_username|capitalize }}"
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dev.yaml`.
|
||||
### GitHub
|
||||
|
||||
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
just a regular OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: dex
|
||||
idp_name: "My Dex server"
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Google
|
||||
|
||||
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
|
||||
|
||||
1. Set up a project in the Google API Console (see
|
||||
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
|
||||
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: twitch
|
||||
idp_name: Twitch
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitLab
|
||||
|
||||
1. Create a [new application](https://gitlab.com/profile/applications).
|
||||
2. Add the `read_user` and `openid` scopes.
|
||||
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes: ["openid", "read_user"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.nickname }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Facebook
|
||||
|
||||
0. You will need a Facebook developer account. You can register for one
|
||||
[here](https://developers.facebook.com/async/registration/).
|
||||
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
|
||||
console, "Create App", and choose "Build Connected Experiences".
|
||||
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
|
||||
need to go through the whole form here.
|
||||
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
|
||||
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
|
||||
URL.
|
||||
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
|
||||
and "App Secret" for use below.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://www.facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "email"]
|
||||
authorization_endpoint: "https://facebook.com/dialog/oauth"
|
||||
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
|
||||
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ '{{ user.email }}' }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
|
||||
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
|
||||
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
|
||||
|
||||
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
|
||||
but it has a `response_types_supported` which excludes "code" (which we rely on, and
|
||||
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
|
||||
so we have to disable discovery and configure the URIs manually.
|
||||
|
||||
### Gitea
|
||||
|
||||
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new application.
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitea
|
||||
idp_name: Gitea
|
||||
discover: false
|
||||
issuer: "https://your-gitea.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: [] # Gitea doesn't support Scopes
|
||||
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
|
||||
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: xwiki
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_auth_method: none
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Apple
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
|
||||
has more information on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### Django OAuth Toolkit
|
||||
@@ -291,420 +575,3 @@ oidc_providers:
|
||||
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### Facebook
|
||||
|
||||
0. You will need a Facebook developer account. You can register for one
|
||||
[here](https://developers.facebook.com/async/registration/).
|
||||
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
|
||||
console, "Create App", and choose "Build Connected Experiences".
|
||||
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
|
||||
need to go through the whole form here.
|
||||
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
|
||||
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
|
||||
URL.
|
||||
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
|
||||
and "App Secret" for use below.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://www.facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "email"]
|
||||
authorization_endpoint: "https://facebook.com/dialog/oauth"
|
||||
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
|
||||
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
|
||||
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
|
||||
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
|
||||
|
||||
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
|
||||
but it has a `response_types_supported` which excludes "code" (which we rely on, and
|
||||
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
|
||||
so we have to disable discovery and configure the URIs manually.
|
||||
|
||||
### GitHub
|
||||
|
||||
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
just a regular OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitLab
|
||||
|
||||
1. Create a [new application](https://gitlab.com/profile/applications).
|
||||
2. Add the `read_user` and `openid` scopes.
|
||||
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes: ["openid", "read_user"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.nickname }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Gitea
|
||||
|
||||
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new application.
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitea
|
||||
idp_name: Gitea
|
||||
discover: false
|
||||
issuer: "https://your-gitea.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: [] # Gitea doesn't support Scopes
|
||||
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### Google
|
||||
|
||||
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
|
||||
|
||||
1. Set up a project in the Google API Console (see
|
||||
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
|
||||
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile", "email"] # email is optional, read below
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}" # needs "email" in scopes above
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
### Keycloak
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
|
||||
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/guides) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
|
||||
2. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
|
||||
3. Click `Save`
|
||||
4. Fill in the fields as below:
|
||||
|
||||
| Field | Value |
|
||||
|-----------|-----------|
|
||||
| Client ID | `synapse` |
|
||||
| Enabled | `On` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
|
||||
| Backchannel Logout Session Required (optional) | `On` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
|
||||
| Field | Value |
|
||||
|-------|-------|
|
||||
| Client Authenticator | `Client ID and Secret` |
|
||||
|
||||
7. Click `Regenerate Secret`
|
||||
8. Copy Secret
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
backchannel_logout_enabled: true # Optional
|
||||
```
|
||||
|
||||
### LemonLDAP
|
||||
|
||||
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
|
||||
|
||||
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
|
||||
2. The parameters are:
|
||||
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
|
||||
Client ID`)
|
||||
- Client secret (`Options > Basic > Client secret`)
|
||||
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
|
||||
(`Options > Security > ID Token signature algorithm` and `Options > Security >
|
||||
Access Token signature algorithm`)
|
||||
- Scopes: OpenID, Email and Profile
|
||||
- Allowed redirection addresses for login (`Options > Basic > Allowed
|
||||
redirection addresses for login` ) :
|
||||
`[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: lemonldap
|
||||
idp_name: lemonldap
|
||||
discover: true
|
||||
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
|
||||
client_id: "your client id" # TO BE FILLED
|
||||
client_secret: "your client secret" # TO BE FILLED
|
||||
scopes:
|
||||
- "openid"
|
||||
- "profile"
|
||||
- "email"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}}"
|
||||
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
|
||||
display_name_template: "{{ user.preferred_username|capitalize }}"
|
||||
```
|
||||
|
||||
### Mastodon
|
||||
|
||||
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
|
||||
|
||||
The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
|
||||
|
||||
This example assumes that:
|
||||
* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
|
||||
* Synapse will be registered as an app named `my_synapse_app`.
|
||||
|
||||
Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
|
||||
```sh
|
||||
curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
|
||||
```
|
||||
|
||||
You should receive a response similar to the following. Make sure to save it.
|
||||
```json
|
||||
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
|
||||
```
|
||||
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: my_mastodon
|
||||
idp_name: "Mastodon Instance Example"
|
||||
discover: false
|
||||
issuer: "https://your.mastodon.instance.url/@admin"
|
||||
client_id: "someclientid_123"
|
||||
client_secret: "someclientsecret_123"
|
||||
authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
|
||||
token_endpoint: "https://your.mastodon.instance.url/oauth/token"
|
||||
userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
|
||||
scopes: ["read"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
```
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
### Shibboleth with OIDC Plugin
|
||||
|
||||
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
|
||||
|
||||
1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly.
|
||||
2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret`
|
||||
are randomly generated data.
|
||||
```json
|
||||
{
|
||||
"client_id": "SOME-CLIENT-ID",
|
||||
"client_secret": "SOME-SUPER-SECRET-SECRET",
|
||||
"response_types": ["code"],
|
||||
"grant_types": ["authorization_code"],
|
||||
"scope": "openid profile email",
|
||||
"redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"]
|
||||
}
|
||||
```
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
# Shibboleth IDP
|
||||
#
|
||||
- idp_id: shibboleth
|
||||
idp_name: "Shibboleth Login"
|
||||
discover: true
|
||||
issuer: "https://YOUR-IDP-URL.TLD"
|
||||
client_id: "YOUR_CLIENT_ID"
|
||||
client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP"
|
||||
scopes: ["openid", "profile", "email"]
|
||||
allow_existing_users: true
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "sub"
|
||||
localpart_template: "{{ user.sub.split('@')[0] }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: twitch
|
||||
idp_name: Twitch
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Twitter
|
||||
|
||||
*Using Twitter as an identity provider requires using Synapse 1.75.0 or later.*
|
||||
|
||||
1. Setup a developer account on [Twitter](https://developer.twitter.com/en/portal/dashboard)
|
||||
2. Create a project & app.
|
||||
3. Enable user authentication and under "Type of App" choose "Web App, Automated App or Bot".
|
||||
4. Under "App info" set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
5. Obtain the OAuth 2.0 credentials under the "Keys and tokens" tab, copy the "OAuth 2.0 Client ID and Client Secret"
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: twitter
|
||||
idp_name: Twitter
|
||||
idp_brand: "twitter" # optional: styling hint for clients
|
||||
discover: false # Twitter is not OpenID compliant.
|
||||
issuer: "https://twitter.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
pkce_method: "always"
|
||||
# offline.access providers refresh tokens, tweet.read and users.read needed for userinfo request.
|
||||
scopes: ["offline.access", "tweet.read", "users.read"]
|
||||
authorization_endpoint: https://twitter.com/i/oauth2/authorize
|
||||
token_endpoint: https://api.twitter.com/2/oauth2/token
|
||||
userinfo_endpoint: https://api.twitter.com/2/users/me?user.fields=profile_image_url
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_template: "{{ user.data.id }}"
|
||||
localpart_template: "{{ user.data.username }}"
|
||||
display_name_template: "{{ user.data.name }}"
|
||||
picture_template: "{{ user.data.profile_image_url }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
|
||||
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: xwiki
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_auth_method: none
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Using Postgres
|
||||
|
||||
The minimum supported version of PostgreSQL is determined by the [Dependency
|
||||
Deprecation Policy](deprecation_policy.md).
|
||||
Synapse supports PostgreSQL versions 10 or later.
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
@@ -16,7 +15,7 @@ connect to a postgres database.
|
||||
- For other pre-built packages, please consult the documentation from
|
||||
the relevant package.
|
||||
- If you installed synapse [in a
|
||||
virtualenv](setup/installation.md#installing-as-a-python-module-from-pypi), you can install
|
||||
virtualenv](setup/installation.md#installing-from-source), you can install
|
||||
the library with:
|
||||
|
||||
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
|
||||
|
||||
@@ -46,7 +46,7 @@ when using a containerized Synapse, as that will prevent it from responding
|
||||
to proxied traffic.)
|
||||
|
||||
Optionally, you can also set
|
||||
[`request_id_header`](./usage/configuration/config_documentation.md#listeners)
|
||||
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
|
||||
so that the server extracts and re-uses the same request ID format that the
|
||||
reverse proxy is using.
|
||||
|
||||
@@ -79,9 +79,6 @@ server {
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
|
||||
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
|
||||
@@ -26,8 +26,8 @@ for most users.
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
|
||||
which can be used with the docker-compose file available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
the docker-compose file available at
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||
Further information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
@@ -84,9 +84,7 @@ file when you upgrade the Debian package to a later version.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
Andrej Shadura maintains a
|
||||
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
|
||||
the Debian repositories.
|
||||
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
|
||||
For `bookworm` and `sid`, it can be installed simply with:
|
||||
|
||||
```sh
|
||||
@@ -102,27 +100,23 @@ for information on how to use backports.
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are [old and suffer from known security vulnerabilities](
|
||||
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
|
||||
).
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as
|
||||
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Additionally, Oleg Girko provides Fedora RPMs at
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as
|
||||
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
@@ -136,7 +130,7 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
@@ -157,8 +151,7 @@ sudo pip install py-bcrypt
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as
|
||||
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
@@ -200,7 +193,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- Python 3.7 or later, up to Python 3.10.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
@@ -278,7 +271,7 @@ Installing prerequisites on Ubuntu or Debian:
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
@@ -287,7 +280,7 @@ Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3 icu
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
@@ -297,8 +290,7 @@ Installing prerequisites on CentOS or Fedora Linux:
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel \
|
||||
libicu-devel
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
@@ -311,12 +303,8 @@ You may need to install the latest Xcode developer tools:
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) for them.
|
||||
|
||||
You may need to install icu, and make the icu binaries and libraries accessible.
|
||||
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
|
||||
|
||||
On ARM-based Macs you may also need to install libjpeg and libpq:
|
||||
On ARM-based Macs you may need to install libjpeg and libpq.
|
||||
You can use Homebrew (https://brew.sh):
|
||||
```sh
|
||||
brew install jpeg libpq
|
||||
```
|
||||
@@ -337,8 +325,7 @@ Installing prerequisites on openSUSE:
|
||||
```sh
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel \
|
||||
libicu-devel
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
# coturn TURN server
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian and Ubuntu based distributions
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
sudo apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure `coturn` to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, `coturn` can be configured to write to a
|
||||
logfile - check the example config file supplied with `coturn`.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure `coturn` to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `coturn` to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
sudo systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you built from source:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/turnserver -o
|
||||
```
|
||||
@@ -1,170 +0,0 @@
|
||||
# eturnal TURN server
|
||||
|
||||
The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
|
||||
(which implements the TURN REST API).
|
||||
|
||||
## `eturnal` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The `eturnal` TURN server implementation is available from a variety of sources
|
||||
such as native package managers, binary packages, installation from source or
|
||||
[container image](https://eturnal.net/documentation/code/docker.html). They are
|
||||
all described [here](https://github.com/processone/eturnal#installation).
|
||||
|
||||
Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
|
||||
or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
|
||||
are available as well.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
|
||||
here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
|
||||
`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
|
||||
format. The file contains further explanations.
|
||||
|
||||
Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
|
||||
or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
|
||||
You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
|
||||
|
||||
`eturnal` runs out of the box with the default configuration. To enable TURN and
|
||||
to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
|
||||
must be edited:
|
||||
|
||||
1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
and eturnal's shared `secret` for authentication
|
||||
|
||||
Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
|
||||
configuration file:
|
||||
|
||||
```yaml
|
||||
secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
|
||||
```
|
||||
|
||||
One way to generate a `secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
1. Public IP address
|
||||
|
||||
If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
|
||||
however, it may also be configured by uncommenting and adjusting this line, so
|
||||
`eturnal` advertises that address to connecting clients:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `eturnal` to advertise each available address:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. Logging
|
||||
|
||||
If `eturnal` was started by systemd, log files are written into the
|
||||
`/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
|
||||
instead, the `log_dir` option can be set to `stdout` in the configuration file.
|
||||
|
||||
1. Security considerations
|
||||
|
||||
Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
|
||||
|
||||
```yaml
|
||||
## Reject TURN relaying from/to the following addresses/networks:
|
||||
blacklist: # This is the default blacklist.
|
||||
- "127.0.0.0/8" # IPv4 loopback.
|
||||
- "::1" # IPv6 loopback.
|
||||
- recommended # Expands to a number of networks recommended to be
|
||||
# blocked, but includes private networks. Those
|
||||
# would have to be 'whitelist'ed if eturnal serves
|
||||
# local clients/peers within such networks.
|
||||
```
|
||||
|
||||
To whitelist IP addresses or specific (private) networks, you need to **add** a
|
||||
whitelist part into the configuration file, e.g.:
|
||||
|
||||
```yaml
|
||||
whitelist:
|
||||
- "192.168.0.0/16"
|
||||
- "203.0.113.113"
|
||||
- "2001:db8::/64"
|
||||
```
|
||||
|
||||
The more specific, the better.
|
||||
|
||||
1. TURNS (TURN via TLS/DTLS)
|
||||
|
||||
Also consider supporting TLS/DTLS. To do this, adjust the following settings
|
||||
in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
|
||||
|
||||
```yaml
|
||||
listen:
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: udp
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: tcp
|
||||
- ip: "::"
|
||||
port: 5349
|
||||
transport: tls
|
||||
|
||||
## TLS certificate/key files (must be readable by 'eturnal' user!):
|
||||
tls_crt_file: /etc/eturnal/tls/crt.pem
|
||||
tls_key_file: /etc/eturnal/tls/key.pem
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
|
||||
with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Firewall
|
||||
|
||||
Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. Reload/ restarting `eturnal`
|
||||
|
||||
Changes in the configuration file require `eturnal` to reload/ restart, this
|
||||
can be achieved by:
|
||||
|
||||
```sh
|
||||
eturnalctl reload
|
||||
```
|
||||
|
||||
`eturnal` performs a configuration check before actually reloading/ restarting
|
||||
and provides hints, if something is not correctly configured.
|
||||
|
||||
### eturnalctl opterations script
|
||||
|
||||
`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
|
||||
which can be called e.g. to check, whether the service is up, to restart the service,
|
||||
to query how many active sessions exist, to change logging behaviour and so on.
|
||||
|
||||
Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
|
||||
@@ -120,7 +120,7 @@ specified in the config. It is located at
|
||||
## SAML Mapping Providers
|
||||
|
||||
The SAML mapping provider can be customized by editing the
|
||||
[`saml2_config.user_mapping_provider.module`](usage/configuration/config_documentation.md#saml2_config)
|
||||
[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
|
||||
config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
|
||||
@@ -17,7 +17,6 @@ worker_listeners:
|
||||
#
|
||||
#- type: http
|
||||
# port: 8035
|
||||
# x_forwarded: true
|
||||
# resources:
|
||||
# - names: [client]
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
@@ -5,10 +5,11 @@ worker_name: generic_worker1
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ worker_replication_http_port: 9093
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [media]
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
@@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands
|
||||
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
|
||||
the format of `<row>` is defined by the individual streams. The
|
||||
`<instance_name>` is the name of the Synapse process that generated the data
|
||||
(usually "master"). We expect an RDATA for every row in the DB.
|
||||
(usually "master").
|
||||
|
||||
Error reporting happens by either the client or server sending an ERROR
|
||||
command, and usually the connection will be closed.
|
||||
@@ -107,7 +107,7 @@ reconnect, following the steps above.
|
||||
If the server sends messages faster than the client can consume them the
|
||||
server will first buffer a (fairly large) number of commands and then
|
||||
disconnect the client. This ensures that we don't queue up an unbounded
|
||||
number of commands in memory and gives us a potential opportunity to
|
||||
number of commands in memory and gives us a potential oppurtunity to
|
||||
squawk loudly. When/if the client recovers it can reconnect to the
|
||||
server and ask for missed messages.
|
||||
|
||||
@@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on
|
||||
connection errors.
|
||||
|
||||
The client should keep track of the token in the last RDATA command
|
||||
received for each stream so that on reconnection it can start streaming
|
||||
received for each stream so that on reconneciton it can start streaming
|
||||
from the correct place. Note: not all RDATA have valid tokens due to
|
||||
batching. See `RdataCommand` for more details.
|
||||
|
||||
@@ -188,8 +188,7 @@ client (C):
|
||||
Two positions are included, the "new" position and the last position sent respectively.
|
||||
This allows servers to tell instances that the positions have advanced but no
|
||||
data has been written, without clients needlessly checking to see if they
|
||||
have missed any updates. Instances will only fetch stuff if there is a gap between
|
||||
their current position and the given last position.
|
||||
have missed any updates.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
|
||||
@@ -9,28 +9,222 @@ allows the homeserver to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the homeserver and the
|
||||
TURN server.
|
||||
|
||||
This documentation provides two TURN server configuration examples:
|
||||
|
||||
* [coturn](setup/turn/coturn.md)
|
||||
* [eturnal](setup/turn/eturnal.md)
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
|
||||
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
|
||||
|
||||
Afterwards, the homeserver needs some further configuration.
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian installation
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure coturn to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure coturn to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you installed from source:
|
||||
|
||||
```sh
|
||||
bin/turnserver -o
|
||||
```
|
||||
|
||||
## Synapse setup
|
||||
|
||||
Your homeserver configuration file needs the following extra keys:
|
||||
|
||||
1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
|
||||
2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
|
||||
4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
|
||||
1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
|
||||
for your TURN server to be given out to your clients. Add separate
|
||||
entries for each transport your TURN server supports.
|
||||
2. "`turn_shared_secret`": This is the secret shared between your
|
||||
homeserver and your TURN server, so you should set it to the same
|
||||
string you used in turnserver.conf.
|
||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||
generated by your homeserver are valid for (in milliseconds).
|
||||
Shorter times offer less potential for abuse at the expense of
|
||||
increased traffic between web clients and your homeserver to
|
||||
refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
4. "`turn_allow_guests`": Whether to allow guest users to use the
|
||||
TURN server. This is enabled by default, as otherwise VoIP will
|
||||
not work reliably for guests. However, it does introduce a
|
||||
security risk as it lets guests connect to arbitrary endpoints
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
@@ -38,7 +232,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
|
||||
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: true
|
||||
turn_allow_guests: True
|
||||
|
||||
After updating the homeserver configuration, you must restart synapse:
|
||||
|
||||
@@ -69,7 +263,7 @@ Here are a few things to try:
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Try disabling TLS/DTLS listeners and enable only its (unencrypted)
|
||||
* Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
|
||||
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
|
||||
voice & video WebRTC traffic is always encrypted.)
|
||||
|
||||
@@ -94,19 +288,12 @@ Here are a few things to try:
|
||||
|
||||
* ensure that your TURN server uses the NAT gateway as its default route.
|
||||
|
||||
* Enable more verbose logging, in `coturn` via the `verbose` setting:
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
|
||||
|
||||
```yaml
|
||||
## Logging configuration:
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
@@ -130,7 +317,7 @@ Here are a few things to try:
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands for `coturn`:
|
||||
* Use the following shell commands:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
@@ -140,16 +327,11 @@ Here are a few things to try:
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
or for `eturnal`
|
||||
Or:
|
||||
|
||||
```sh
|
||||
eturnalctl credentials
|
||||
```
|
||||
|
||||
|
||||
* Or (**coturn only**): Temporarily configure `coturn` to accept a static
|
||||
username/password. To do this, comment out `use-auth-secret` and
|
||||
`static-auth-secret` and add the following:
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
|
||||
204
docs/upgrade.md
204
docs/upgrade.md
@@ -88,204 +88,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.81.0
|
||||
|
||||
## Application service path & authentication deprecations
|
||||
|
||||
Synapse now attempts the versioned appservice paths before falling back to the
|
||||
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
|
||||
Usage of the legacy routes should be considered deprecated.
|
||||
|
||||
Additionally, Synapse has supported sending the application service access token
|
||||
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
|
||||
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
|
||||
query parameter. This is insecure and should be considered deprecated.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service routes and query parameter authorization.
|
||||
|
||||
# Upgrading to v1.80.0
|
||||
|
||||
## Reporting events error code change
|
||||
|
||||
Before this update, the
|
||||
[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
|
||||
endpoint would return a `403` if a user attempted to report an event that they did not have access to.
|
||||
This endpoint will now return a `404` in this case instead.
|
||||
|
||||
Clients that implement event reporting should check that their error handling code will handle this
|
||||
change.
|
||||
|
||||
# Upgrading to v1.79.0
|
||||
|
||||
## The `on_threepid_bind` module callback method has been deprecated
|
||||
|
||||
Synapse v1.79.0 deprecates the
|
||||
[`on_threepid_bind`](modules/third_party_rules_callbacks.md#on_threepid_bind)
|
||||
"third-party rules" Synapse module callback method in favour of a new module method,
|
||||
[`on_add_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_add_user_third_party_identifier).
|
||||
`on_threepid_bind` will be removed in a future version of Synapse. You should check whether any Synapse
|
||||
modules in use in your deployment are making use of `on_threepid_bind`, and update them where possible.
|
||||
|
||||
The arguments and functionality of the new method are the same.
|
||||
|
||||
The justification behind the name change is that the old method's name, `on_threepid_bind`, was
|
||||
misleading. A user is considered to "bind" their third-party ID to their Matrix ID only if they
|
||||
do so via an [identity server](https://spec.matrix.org/latest/identity-service-api/)
|
||||
(so that users on other homeservers may find them). But this method was not called in that case -
|
||||
it was only called when a user added a third-party identifier on the local homeserver.
|
||||
|
||||
Module developers may also be interested in the related
|
||||
[`on_remove_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_remove_user_third_party_identifier)
|
||||
module callback method that was also added in Synapse v1.79.0. This new method is called when a
|
||||
user removes a third-party identifier from their account.
|
||||
|
||||
# Upgrading to v1.78.0
|
||||
|
||||
## Deprecate the `/_synapse/admin/v1/media/<server_name>/delete` admin API
|
||||
|
||||
Synapse 1.78.0 replaces the `/_synapse/admin/v1/media/<server_name>/delete`
|
||||
admin API with an identical endpoint at `/_synapse/admin/v1/media/delete`. Please
|
||||
update your tooling to use the new endpoint. The deprecated version will be removed
|
||||
in a future release.
|
||||
|
||||
# Upgrading to v1.76.0
|
||||
|
||||
## Faster joins are enabled by default
|
||||
|
||||
When joining a room for the first time, Synapse 1.76.0 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag.
|
||||
|
||||
Server admins can opt out of this feature for the time being by setting
|
||||
|
||||
```yaml
|
||||
experimental:
|
||||
faster_joins: false
|
||||
```
|
||||
|
||||
in their server config.
|
||||
|
||||
## Changes to the account data replication streams
|
||||
|
||||
Synapse has changed the format of the account data and devices replication
|
||||
streams (between workers). This is a forwards- and backwards-incompatible
|
||||
change: v1.75 workers cannot process account data replicated by v1.76 workers,
|
||||
and vice versa.
|
||||
|
||||
Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data
|
||||
and device replication will resume as normal.
|
||||
|
||||
## Minimum version of Poetry is now 1.3.2
|
||||
|
||||
The minimum supported version of Poetry is now 1.3.2 (previously 1.2.0, [since
|
||||
Synapse 1.67](#upgrading-to-v1670)). If you have used `poetry install` to
|
||||
install Synapse from a source checkout, you should upgrade poetry: see its
|
||||
[installation instructions](https://python-poetry.org/docs/#installation).
|
||||
For all other installation methods, no acction is required.
|
||||
|
||||
# Upgrading to v1.74.0
|
||||
|
||||
## Unicode support in user search
|
||||
|
||||
This version introduces optional support for an [improved user search dealing with Unicode characters](https://github.com/matrix-org/synapse/pull/14464).
|
||||
|
||||
If you want to take advantage of this feature you need to install PyICU,
|
||||
the ICU native dependency and its development headers
|
||||
so that PyICU can build since no prebuilt wheels are available.
|
||||
|
||||
You can follow [the PyICU documentation](https://pypi.org/project/PyICU/) to do so,
|
||||
and then do `pip install matrix-synapse[user-search]` for a PyPI install.
|
||||
|
||||
Docker images and Debian packages need nothing specific as they already
|
||||
include or specify ICU as an explicit dependency.
|
||||
|
||||
|
||||
## User directory rebuild
|
||||
|
||||
Synapse 1.74 queues a background update
|
||||
[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643),
|
||||
in order to fix missing or erroneous entries.
|
||||
|
||||
When this update begins, the user directory will be cleared out and rebuilt from
|
||||
scratch. User directory lookups will be incomplete until the rebuild completes.
|
||||
Admins can monitor the rebuild's progress by using the
|
||||
[Background update Admin API](usage/administration/admin_api/background_updates.md#status).
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
|
||||
and offered an option to disable them.
|
||||
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
|
||||
|
||||
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
|
||||
This also means that the `enable_legacy_metrics` configuration option has been
|
||||
removed; it will no longer be possible to re-enable the legacy metric names.
|
||||
|
||||
If you use metrics and have not yet updated your Grafana dashboard(s),
|
||||
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
|
||||
to this version.
|
||||
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
|
||||
metric names which were missed when legacy metrics were disabled by default.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
|
||||
for more context.
|
||||
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
||||
In line with our [deprecation policy](deprecation_policy.md), we've dropped
|
||||
support for PostgreSQL 10, as it is no longer supported upstream.
|
||||
|
||||
This release of Synapse requires PostgreSQL 11+.
|
||||
|
||||
|
||||
# Upgrading to v1.71.0
|
||||
|
||||
## Removal of the `generate_short_term_login_token` module API method
|
||||
|
||||
As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
|
||||
|
||||
Modules relying on it can instead use the `create_login_token` method.
|
||||
|
||||
|
||||
## Changes to the events received by application services (interest)
|
||||
|
||||
To align with spec (changed in
|
||||
[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
|
||||
only considers local users to be interesting. In other words, the `users` namespace
|
||||
regex is only be applied against local users of the homeserver.
|
||||
|
||||
Please note, this probably doesn't affect the expected behavior of your application
|
||||
service, since an interesting local user in a room still means all messages in the room
|
||||
(from local or remote users) will still be considered interesting. And matching a room
|
||||
with the `rooms` or `aliases` namespace regex will still consider all events sent in the
|
||||
room to be interesting to the application service.
|
||||
|
||||
If one of your application service's `users` regex was intending to match a remote user,
|
||||
this will no longer match as you expect. The behavioral mismatch between matching all
|
||||
local users and some remote users is why the spec was changed/clarified and this
|
||||
caveat is no longer supported.
|
||||
|
||||
|
||||
## Legacy Prometheus metric names are now disabled by default
|
||||
|
||||
Synapse v1.71.0 disables legacy Prometheus metric names by default.
|
||||
For administrators that still rely on them and have not yet had chance to update their
|
||||
uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
|
||||
it will no longer be possible to re-enable them.
|
||||
|
||||
If you do not use metrics or you have already updated your Grafana dashboard(s),
|
||||
Prometheus console(s) and alerting rule(s), there is no action needed.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
|
||||
|
||||
|
||||
# Upgrading to v1.69.0
|
||||
|
||||
## Changes to the receipts replication streams
|
||||
@@ -995,8 +797,8 @@ Any scripts still using the above APIs should be converted to use the
|
||||
## User-interactive authentication fallback templates can now display errors
|
||||
|
||||
This may affect you if you make use of custom HTML templates for the
|
||||
[reCAPTCHA (`synapse/res/templates/recaptcha.html`)](https://github.com/matrix-org/synapse/tree/develop/synapse/res/templates/recaptcha.html) or
|
||||
[terms (`synapse/res/templates/terms.html`)](https://github.com/matrix-org/synapse/tree/develop/synapse/res/templates/terms.html) fallback pages.
|
||||
[reCAPTCHA](../synapse/res/templates/recaptcha.html) or
|
||||
[terms](../synapse/res/templates/terms.html) fallback pages.
|
||||
|
||||
The template is now provided an `error` variable if the authentication
|
||||
process failed. See the default templates linked above for an example.
|
||||
@@ -1594,7 +1396,7 @@ New templates (`sso_auth_confirm.html`, `sso_auth_success.html`, and
|
||||
is configured to use SSO and a custom
|
||||
`sso_redirect_confirm_template_dir` configuration then these templates
|
||||
will need to be copied from
|
||||
[`synapse/res/templates`](https://github.com/matrix-org/synapse/tree/develop/synapse/res/templates) into that directory.
|
||||
[synapse/res/templates](synapse/res/templates) into that directory.
|
||||
|
||||
## Synapse SSO Plugins Method Deprecation
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
An existing user can be marked as a server admin by updating the database directly.
|
||||
|
||||
Check your [database settings](../../configuration/config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator.
|
||||
Check your [database settings](config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator.
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
@@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
@@ -32,10 +32,10 @@ curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_reque
|
||||
```
|
||||
|
||||
For example, suppose we want to
|
||||
[query the account](../../../admin_api/user_admin_api.md#query-user-account) of the user
|
||||
[query the account](user_admin_api.md#query-user-account) of the user
|
||||
`@foo:bar.com`. We need an admin access token (e.g.
|
||||
`syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk`), and we need to know which port
|
||||
Synapse's [`client` listener](../../configuration/config_documentation.md#listeners) is listening
|
||||
Synapse's [`client` listener](config_documentation.md#listeners) is listening
|
||||
on (e.g. `8008`). Then we can use the following command to request the account
|
||||
information from the Admin API.
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user