1
0

Compare commits

..

11 Commits

Author SHA1 Message Date
Andrew Morgan
6dc4fda9c7 Remove entries from 'account_data_undelivered_deletes' when account data is added/modified
This table should only be tracking deleted account data
entries.
2022-12-19 16:44:24 +00:00
Andrew Morgan
c8721fd25b Populate 'account_data_undelivered_deletes' table on delete 2022-12-19 16:44:24 +00:00
Andrew Morgan
fdf9e2f9d6 Add migration for table account_data_undelivered_deletes 2022-12-19 16:44:24 +00:00
Andrew Morgan
8e35bfc889 Prevent deleted account data items appearing in initial syncs 2022-12-19 16:44:24 +00:00
Andrew Morgan
47fe40b7ca Return a 404 if an account data type is empty 2022-12-19 16:44:24 +00:00
Andrew Morgan
5c7d2e05e8 Allow deleting account data by PUT'ing with empty content
MSC3391 specifies that for backwards compatibility purposes, setting an account data type's content to {}
should be equivalent to deleting that account data. That call should succeed regardless of whether the
account data existed previously or not.
2022-12-19 16:44:24 +00:00
Andrew Morgan
f112fd6c12 Add servlets, handler, storage functions for deleting user/room account data 2022-12-19 16:44:24 +00:00
Andrew Morgan
8c255cbb1d Set 'experimental_features.msc3391_enabled' config to true for complement tests 2022-12-19 16:44:24 +00:00
Andrew Morgan
f66d743eae Add replication methods for removing account data
Also rename existing account data methods, explicitly stating that they're for adding account data.
2022-12-19 16:44:24 +00:00
Andrew Morgan
16cccade57 Enable Complement tests for MSC3391 2022-12-19 16:44:24 +00:00
Andrew Morgan
c5765a480f Add experimental features flag 2022-12-19 16:44:24 +00:00
690 changed files with 14336 additions and 32850 deletions

View File

@@ -50,16 +50,7 @@ def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
check_is_abi3_compatible(wheel_file)
# HACK: it seems that some older versions of pip will consider a wheel marked
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
# here; there are some clues in
# https://github.com/pantsbuild/pants/pull/12857
# https://github.com/pypa/pip/issues/9138
# https://github.com/pypa/packaging/pull/319
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
# both versions are valid for backwards compatibility.
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
abi3_tag = Tag(tag.interpreter, "abi3", platform)
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(

View File

@@ -109,26 +109,11 @@ sytest_tests = [
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "focal",
"reactor": "asyncio",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
"reactor": "asyncio",
},
{
"sytest-tag": "testing",
"postgres": "postgres",

View File

@@ -1,23 +0,0 @@
#! /usr/bin/env python
import sys
if sys.version_info < (3, 11):
raise RuntimeError("Requires at least Python 3.11, to import tomllib")
import tomllib
with open("poetry.lock", "rb") as f:
lockfile = tomllib.load(f)
try:
lock_version = lockfile["metadata"]["lock-version"]
assert lock_version == "2.0"
except Exception:
print(
"""\
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
https://matrix-org.github.io/synapse/develop/development/dependencies.html
"""
)
raise

View File

@@ -31,6 +31,34 @@ sed -i \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.2.0
poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"

View File

@@ -9,6 +9,16 @@ set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest

View File

@@ -23,9 +23,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-dat
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir_r="/tmp/export_data/rooms"
dir_u="/tmp/export_data/user_data"
if [ -d "$dir_r" ] && [ -d "$dir_u" ]; then
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
@@ -44,9 +43,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-d
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir_r2="/tmp/export_data2/rooms"
dir_u2="/tmp/export_data2/user_data"
if [ -d "$dir_r2" ] && [ -d "$dir_u2" ]; then
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."

18
.flake8 Normal file
View File

@@ -0,0 +1,18 @@
# TODO: incorporate this into pyproject.toml if flake8 supports it in the future.
# See https://github.com/PyCQA/flake8/issues/234
[flake8]
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
# for error codes. The ones we ignore are:
# W503: line break before binary operator
# W504: line break after binary operator
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
#
# flake8-bugbear runs extra checks. Its error codes are described at
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
# B023: Functions defined inside a loop must not use variables redefined in the loop
# B024: Abstract base class with no abstract method.
ignore=W503,W504,E203,E731,E501,B019,B023,B024

View File

@@ -21,8 +21,4 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
# Update black to 23.1.0 (#15103)
9bb2eac71962970d02842bca441f4bcdbbf93a11
c4268e3da64f1abb5b31deaeb5769adb6510c0a7

View File

@@ -129,7 +129,7 @@ body:
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level.
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data.

View File

@@ -0,0 +1,46 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
run: |
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View File

@@ -10,7 +10,6 @@ on:
permissions:
contents: read
packages: write
jobs:
build:
@@ -35,20 +34,11 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: |
docker.io/matrixdotorg/synapse
ghcr.io/matrix-org/synapse
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |
@@ -58,7 +48,7 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v4
uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
@@ -22,7 +22,7 @@ jobs:
path: book
- name: 📤 Deploy to Netlify
uses: matrix-org/netlify-pr-preview@v2
uses: matrix-org/netlify-pr-preview@v1
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}

View File

@@ -4,15 +4,13 @@ on:
pull_request:
paths:
- docs/**
- book.toml
- .github/workflows/docs-pr.yaml
jobs:
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
@@ -34,27 +32,3 @@ jobs:
path: book
# We'll only use this in a workflow_run, then we're done with it
retention-days: 1
link-check:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Setup htmltest
run: |
wget https://github.com/wjdp/htmltest/releases/download/v0.17.0/htmltest_0.17.0_linux_amd64.tar.gz
echo '775c597ee74899d6002cd2d93076f897f4ba68686bceabe2e5d72e84c57bc0fb htmltest_0.17.0_linux_amd64.tar.gz' | sha256sum -c
tar zxf htmltest_0.17.0_linux_amd64.tar.gz
- name: Test links with htmltest
# Build the book with `./` as the site URL (to make checks on 404.html possible)
# Then run htmltest (without checking external links since that involves the network and is slow).
run: |
MDBOOK_OUTPUT__HTML__SITE_URL="./" mdbook build
./htmltest book --skip-external

View File

@@ -13,10 +13,25 @@ on:
workflow_dispatch:
jobs:
pre:
name: Calculate variables for GitHub Pages deployment
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Figure out the target directory.
#
# The target directory depends on the name of the branch
@@ -40,65 +55,11 @@ jobs:
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
outputs:
branch-version: ${{ steps.vars.outputs.branch-version }}
################################################################################
pages-docs:
name: GitHub Pages
runs-on: ubuntu-latest
needs:
- pre
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ needs.pre.outputs.branch-version }}
################################################################################
pages-devdocs:
name: GitHub Pages (developer docs)
runs-on: ubuntu-latest
needs:
- pre
steps:
- uses: actions/checkout@v3
- name: "Set up Sphinx"
uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
groups: "dev-docs"
extras: ""
- name: Build the documentation
run: |
cd dev-docs
poetry run make html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./dev-docs/_build/html
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
destination_dir: ./${{ steps.vars.outputs.branch-version }}

View File

@@ -27,7 +27,9 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -35,7 +37,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
poetry-version: "1.2.0"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
@@ -59,7 +61,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -130,7 +134,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
@@ -178,8 +184,6 @@ jobs:
with:
path: synapse
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -204,7 +208,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
- uses: JasonEtco/create-an-issue@3a8ba796516b57db8cb2ee6dfc65bc76cd39d56d # v2.8.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -1,24 +0,0 @@
on:
push:
branches: ["develop", "release-*"]
paths:
- poetry.lock
pull_request:
paths:
- poetry.lock
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-sdists:
name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- run: pip install tomli
- run: ./scripts-dev/check_locked_deps_have_sdists.py

View File

@@ -48,7 +48,7 @@ jobs:
with:
ref: master
- name: Login to registry
uses: docker/login-action@v2
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}

View File

@@ -4,15 +4,13 @@ name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs). PRs skip
# building wheels on macOS & ARM.
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
merge_group:
workflow_dispatch:
concurrency:
@@ -129,7 +127,7 @@ jobs:
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
@@ -150,7 +148,7 @@ jobs:
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp3*-* *i686* *musl*
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI

View File

@@ -4,7 +4,6 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
merge_group:
workflow_dispatch:
concurrency:
@@ -34,13 +33,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v4
with:
python-version: "3.x"
poetry-version: "1.3.2"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
@@ -55,70 +52,10 @@ jobs:
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
check-lockfile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: .ci/scripts/check_lockfile.py
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
with:
install-project: "false"
- name: Import order (isort)
run: poetry run isort --check --diff .
- name: Code style (black)
run: poetry run black --check --diff .
- name: Semantic checks (ruff)
# --quiet suppresses the update check.
run: poetry run ruff --quiet .
lint-mypy:
runs-on: ubuntu-latest
name: Typechecking
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
with:
# We want to make use of type hints in optional dependencies too.
extras: all
# We have seen odd mypy failures that were resolved when we started
# installing the project again:
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
# To make CI green, err towards caution and install the project.
install-project: "true"
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
uses: actions/cache@v3
with:
path: |
.mypy_cache
key: mypy-cache-${{ github.context.sha }}
restore-keys: mypy-cache-
- name: Run mypy
run: poetry run mypy
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
@@ -149,12 +86,8 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
@@ -167,8 +100,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -185,7 +122,10 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: nightly-2022-12-01
components: clippy
@@ -202,10 +142,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
# We use nightly so that it correctly groups together imports
toolchain: nightly-2022-12-01
toolchain: 1.58.1
components: rustfmt
- uses: Swatinem/rust-cache@v2
@@ -216,13 +158,11 @@ jobs:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-mypy
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- check-lockfile
- lint-clippy
- lint-rustfmt
runs-on: ubuntu-latest
@@ -268,13 +208,17 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
@@ -308,39 +252,51 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# Note: we install using `pip` here, not poetry. `poetry install` ignores the
# build-system section (https://github.com/python-poetry/poetry/issues/6154), but
# we explicitly want to test that you can `pip install` using the oldest version
# of poetry-core and setuptools-rust.
- run: pip install .[all,test]
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
extras: "all test"
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
# Sanity check we can import/run Synapse
- run: python -m synapse.app.homeserver --help
- run: python -m twisted.trial -j6 tests
- run: poetry run trial -j6 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -372,7 +328,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
@@ -400,7 +355,6 @@ jobs:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
@@ -416,7 +370,12 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -460,7 +419,6 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
@@ -512,7 +470,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
@@ -556,21 +513,21 @@ jobs:
path: synapse
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
name: Run Complement Tests
cargo-test:
@@ -584,31 +541,16 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
# We want to ensure that the cargo benchmarks still compile, which requires a
# nightly compiler.
cargo-bench:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2
- run: cargo bench --no-run
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
@@ -620,7 +562,6 @@ jobs:
- portdb
- complement
- cargo-test
- cargo-bench
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -632,4 +573,3 @@ jobs:
skippable: |
lint-newsfile
cargo-test
cargo-bench

View File

@@ -6,7 +6,7 @@ on:
jobs:
triage:
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
with:
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
content_id: ${{ github.event.issue.node_id }}

View File

@@ -5,13 +5,6 @@ on:
- cron: 0 8 * * *
workflow_dispatch:
inputs:
twisted_ref:
description: Commit, branch or tag to checkout from upstream Twisted.
required: false
default: 'trunk'
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -25,7 +18,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -34,7 +29,7 @@ jobs:
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
@@ -48,7 +43,9 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -85,7 +82,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -141,8 +140,6 @@ jobs:
with:
path: synapse
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -151,7 +148,7 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.3.2
pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
@@ -177,7 +174,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
- uses: JasonEtco/create-an-issue@3a8ba796516b57db8cb2ee6dfc65bc76cd39d56d # v2.8.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

12
.gitignore vendored
View File

@@ -15,10 +15,9 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want poetry, cargo and flake lockfiles.
# We do want the poetry and cargo lockfile.
!poetry.lock
!Cargo.lock
!flake.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -39,9 +38,6 @@ __pycache__/
/.envrc
.direnv/
# For nix/devenv users
.devenv/
# IDEs
/.idea/
/.ropeproject/
@@ -57,7 +53,6 @@ __pycache__/
/coverage.*
/dist/
/docs/build/
/dev-docs/_build/
/htmlcov
/pip-wheel-metadata/
@@ -66,7 +61,7 @@ book/
# complement
/complement-*
/main.tar.gz
/master.tar.gz
# rust
/target/
@@ -74,6 +69,3 @@ book/
# Poetry will create a setup.py, which we don't want to include.
/setup.py
# Don't include users' poetry configs
/poetry.toml

1124
CHANGES.md

File diff suppressed because it is too large Load Diff

60
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.71"
version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
[[package]]
name = "arc-swap"
@@ -132,9 +132,12 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.18"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
@@ -182,9 +185,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.52"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
@@ -229,9 +232,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.8.1"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
dependencies = [
"arc-swap",
"log",
@@ -247,7 +250,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 1.0.104",
"syn",
]
[[package]]
@@ -258,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.104",
"syn",
]
[[package]]
@@ -273,9 +276,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.26"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
@@ -291,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.7.3"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@@ -302,9 +305,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.6.29"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
@@ -320,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.163"
version = "1.0.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2"
checksum = "97fed41fc1a24994d044e6db6935e69511a1153b52c15eb42493b26fa87feba0"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.163"
version = "1.0.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
checksum = "255abe9a125a985c05190d687b320c12f9b1f0b99445e608c21ba0782c719ad8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.10",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.96"
version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
dependencies = [
"itoa",
"ryu",
@@ -372,17 +375,6 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"

1
changelog.d/14263.misc Normal file
View File

@@ -0,0 +1 @@
Improve performance of the `/hierarchy` endpoint.

1
changelog.d/14545.misc Normal file
View File

@@ -0,0 +1 @@
Faster remote room joins: stream the un-partial-stating of events over replication.

1
changelog.d/14546.misc Normal file
View File

@@ -0,0 +1 @@
Faster remote room joins: stream the un-partial-stating of events over replication.

1
changelog.d/14644.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix the *MAU Limits* section of the Grafana dashboard relying on a specific `job` name for the workers of a Synapse deployment.

1
changelog.d/14665.misc Normal file
View File

@@ -0,0 +1 @@
Change `handle_new_client_event` signature so that a 429 does not reach clients on `PartialStateConflictError`, and internally retry when needed instead.

1
changelog.d/14669.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 1.70.0 which could cause spurious `UNIQUE constraint failed` errors in the `rotate_notifs` background job.

1
changelog.d/14672.misc Normal file
View File

@@ -0,0 +1 @@
Remove dependency on jQuery on reCAPTCHA page.

1
changelog.d/14673.doc Normal file
View File

@@ -0,0 +1 @@
Declare support for Python 3.11.

1
changelog.d/14674.doc Normal file
View File

@@ -0,0 +1 @@
Fix `target_memory_usage` being used in the description for the actual `cache_autotune` sub-option `target_cache_memory_usage`.

1
changelog.d/14676.misc Normal file
View File

@@ -0,0 +1 @@
Faster joins: make `computer_state_after_events` consistent with other state-fetching functions that take a `StateFilter`.

1
changelog.d/14680.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints.

1
changelog.d/14681.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints.

1
changelog.d/14685.misc Normal file
View File

@@ -0,0 +1 @@
Improve type annotations for the helper methods on a `CachedFunction`.

1
changelog.d/14693.misc Normal file
View File

@@ -0,0 +1 @@
Bump JasonEtco/create-an-issue from 2.8.1 to 2.8.2.

1
changelog.d/14695.misc Normal file
View File

@@ -0,0 +1 @@
Bump blake2 from 0.10.5 to 0.10.6.

1
changelog.d/14697.misc Normal file
View File

@@ -0,0 +1 @@
Bump serde from 1.0.150 to 1.0.151.

1
changelog.d/14699.misc Normal file
View File

@@ -0,0 +1 @@
Bump hiredis from 2.0.0 to 2.1.0.

1
changelog.d/14700.misc Normal file
View File

@@ -0,0 +1 @@
Bump types-jsonschema from 4.17.0.1 to 4.17.0.2.

1
changelog.d/14701.misc Normal file
View File

@@ -0,0 +1 @@
Bump sentry-sdk from 1.11.1 to 1.12.0.

1
changelog.d/14702.misc Normal file
View File

@@ -0,0 +1 @@
Bump types-setuptools from 65.6.0.1 to 65.6.0.2.

1
changelog.d/14707.misc Normal file
View File

@@ -0,0 +1 @@
Add `.direnv/` directory to .gitignore to prevent local state generated by the [direnv](https://direnv.net/) development tool from being committed.

View File

@@ -1,28 +0,0 @@
# Schema symlinks
This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc.
In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against).
## Example
![](datagrip-aware-of-schema.png)
## Caveats
- Doesn't include temporary tables created ad-hoc by Synapse.
- Postgres only. IDEs will likely be confused by SQLite-specific queries.
- Will not include migrations created after the latest schema dump.
- Symlinks might confuse checkouts on Windows systems.
## Instructions
### Jetbrains IDEs with DataGrip plugin
- View -> Tool Windows -> Database
- `+` Icon -> DDL Data Source
- Pick a name, e.g. `Synapse schema dump`
- Under sources, click `+`.
- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL.
- OK, and OK.
- IDE should now be aware of the schema.
- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`.

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/common/schema_version.sql

View File

@@ -1 +0,0 @@
../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres

View File

@@ -68,12 +68,7 @@ redis:
enabled: true
host: redis
port: 6379
# dbid: <redis_logical_db_id>
# password: <secret_password>
# use_tls: True
# certificate_file: <path_to_certificate>
# private_key_file: <path_to_private_key>
# ca_file: <path_to_ca_certificate>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.

View File

@@ -1,47 +0,0 @@
# `lnav` config for Synapse logs
[lnav](https://lnav.org/) is a log-viewing tool. It is particularly useful when
you need to interleave multiple log files, or for exploring a large log file
with regex filters. The downside is that it is not as ubiquitous as tools like
`less`, `grep`, etc.
This directory contains an `lnav` [log format definition](
https://docs.lnav.org/en/v0.10.1/formats.html#defining-a-new-format
) for Synapse logs as
emitted by Synapse with the default [logging configuration](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#log_config
). It supports lnav 0.10.1 because that's what's packaged by my distribution.
This should allow lnav:
- to interpret timestamps, allowing log interleaving;
- to interpret log severity levels, allowing colouring by log level(!!!);
- to interpret request IDs, allowing you to skip through a specific request; and
- to highlight room, event and user IDs in logs.
See also https://gist.github.com/benje/e2ab750b0a81d11920d83af637d289f7 for a
similar example.
## Example
[![asciicast](https://asciinema.org/a/556133.svg)](https://asciinema.org/a/556133)
## Tips
- `lnav -i /path/to/synapse/checkout/contrib/lnav/synapse-log-format.json`
- `lnav my_synapse_log_file` or `lnav synapse_log_files.*`, etc.
- `lnav --help` for CLI help.
Within lnav itself:
- `?` for help within lnav itself.
- `q` to quit.
- `/` to search a-la `less` and `vim`, then `n` and `N` to continue searching
down and up.
- Use `o` and `O` to skip through logs based on the request ID (`POST-1234`, or
else the value of the [`request_id_header`](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=request_id_header#listeners
) header). This may get confused if the same request ID is repeated among
multiple files or process restarts.
- ???
- Profit

View File

@@ -1,67 +0,0 @@
{
"$schema": "https://lnav.org/schemas/format-v1.schema.json",
"synapse": {
"title": "Synapse logs",
"description": "Logs output by Synapse, a Matrix homesever, under its default logging config.",
"regex": {
"log": {
"pattern": ".*(?<timestamp>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) - (?<logger>.+) - (?<lineno>\\d+) - (?<level>\\w+) - (?<context>.+) - (?<body>.*)"
}
},
"json": false,
"timestamp-field": "timestamp",
"timestamp-format": [
"%Y-%m-%d %H:%M:%S,%L"
],
"level-field": "level",
"body-field": "body",
"opid-field": "context",
"level": {
"critical": "CRITICAL",
"error": "ERROR",
"warning": "WARNING",
"info": "INFO",
"debug": "DEBUG"
},
"sample": [
{
"line": "my-matrix-server-generic-worker-4 | 2023-01-27 09:47:09,818 - synapse.replication.tcp.client - 381 - ERROR - PUT-32992 - Timed out waiting for stream receipts",
"level": "error"
},
{
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{
"line": "my-matrix-server | 2023-01-25 20:55:54,433 - synapse.storage.databases - 66 - INFO - main - [database config 'master']: Checking database server",
"level": "info"
},
{
"line": "my-matrix-server | 2023-01-26 15:08:40,447 - synapse.access.http.8008 - 460 - INFO - PUT-74929 - 0.0.0.0 - 8008 - {@alice:example.com} Processed request: 0.011sec/0.000sec (0.000sec, 0.000sec) (0.001sec/0.008sec/3) 2B 200 \"PUT /_matrix/client/r0/user/%40alice%3Atexample.com/account_data/im.vector.setting.breadcrumbs HTTP/1.0\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Element/1.11.20 Chrome/108.0.5359.179 Electron/22.0.3 Safari/537.36\" [0 dbevts]",
"level": "info"
}
],
"highlights": {
"user_id": {
"pattern": "(@|%40)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_id": {
"pattern": "(!|%21)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_alias": {
"pattern": "(#|%23)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v1_v2": {
"pattern": "(\\$|%25)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v3_plus": {
"pattern": "(\\$|%25)([A-Za-z0-9+/_]|-){43}",
"underline": true
}
}
}
}

View File

@@ -15,19 +15,19 @@ worker_name: generic_worker$i
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
#worker_pid_file: DATADIR/generic_worker$i.pid
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
Customise the script to your needs.

View File

@@ -8,9 +8,7 @@ It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
@@ -48,11 +46,9 @@ worker_listeners:
- type: http
port: $(expr $HTTP_START_PORT + $i)
x_forwarded: true
resources:
- names: [client]
#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
@@ -95,9 +91,7 @@ Simply run the script to create YAML files in the current folder and print out t
```console
$ ./create_stream_writers.sh
```
You should receive an output similar to the following:
```console
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.

View File

@@ -31,11 +31,12 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.3.2
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \

200
debian/changelog vendored
View File

@@ -1,203 +1,3 @@
matrix-synapse-py3 (1.85.2) stable; urgency=medium
* New Synapse release 1.85.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 08 Jun 2023 13:04:18 +0100
matrix-synapse-py3 (1.85.1) stable; urgency=medium
* New Synapse release 1.85.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 07 Jun 2023 10:51:12 +0100
matrix-synapse-py3 (1.85.0) stable; urgency=medium
* New Synapse release 1.85.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Jun 2023 09:39:29 +0100
matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium
* New Synapse release 1.85.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Jun 2023 09:16:18 -0700
matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium
* New Synapse release 1.85.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 30 May 2023 13:56:54 +0100
matrix-synapse-py3 (1.84.1) stable; urgency=medium
* New Synapse release 1.84.1.
-- Synapse Packaging team <packages@matrix.org> Fri, 26 May 2023 16:15:30 +0100
matrix-synapse-py3 (1.84.0) stable; urgency=medium
* New Synapse release 1.84.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100
matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium
* New Synapse release 1.84.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100
matrix-synapse-py3 (1.83.0) stable; urgency=medium
* New Synapse release 1.83.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200
matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium
* New Synapse release 1.83.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100
matrix-synapse-py3 (1.82.0) stable; urgency=medium
* New Synapse release 1.82.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100
matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
* New Synapse release 1.82.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100
matrix-synapse-py3 (1.81.0) stable; urgency=medium
* New Synapse release 1.81.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100
matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium
* New Synapse release 1.81.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100
matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
* New Synapse release 1.81.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
matrix-synapse-py3 (1.80.0) stable; urgency=medium
* New Synapse release 1.80.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
* New Synapse release 1.80.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
* New Synapse release 1.80.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
matrix-synapse-py3 (1.79.0) stable; urgency=medium
* New Synapse release 1.79.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
* New Synapse release 1.79.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
* New Synapse release 1.79.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Mar 2023 12:03:49 +0000
matrix-synapse-py3 (1.78.0) stable; urgency=medium
* New Synapse release 1.78.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Feb 2023 08:56:03 -0800
matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium
* Add `matrix-org-archive-keyring` package as recommended.
* New Synapse release 1.78.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Feb 2023 14:29:19 +0000
matrix-synapse-py3 (1.77.0) stable; urgency=medium
* New Synapse release 1.77.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Feb 2023 12:59:02 +0100
matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium
* New Synapse release 1.77.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Feb 2023 12:44:21 +0000
matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium
* New Synapse release 1.77.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Feb 2023 13:45:14 +0000
matrix-synapse-py3 (1.76.0) stable; urgency=medium
* New Synapse release 1.76.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Jan 2023 08:21:47 -0800
matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium
* New Synapse release 1.76.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 Jan 2023 11:17:57 +0000
matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium
* Use Poetry 1.3.2 to manage the bundled virtualenv included with this package.
* New Synapse release 1.76.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 25 Jan 2023 16:21:16 +0000
matrix-synapse-py3 (1.75.0) stable; urgency=medium
* New Synapse release 1.75.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jan 2023 11:36:02 +0000
matrix-synapse-py3 (1.75.0~rc2) stable; urgency=medium
* New Synapse release 1.75.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 12 Jan 2023 10:30:15 -0800
matrix-synapse-py3 (1.75.0~rc1) stable; urgency=medium
* New Synapse release 1.75.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Jan 2023 12:18:27 +0000
matrix-synapse-py3 (1.74.0) stable; urgency=medium
* New Synapse release 1.74.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
* New dependency on libicu-dev to provide improved results for user

1
debian/control vendored
View File

@@ -37,7 +37,6 @@ Depends:
# so we put perl:Depends in Suggests rather than Depends.
Recommends:
${shlibs1:Recommends},
matrix-org-archive-keyring,
Suggests:
sqlite3,
${perl:Depends},

View File

@@ -46,7 +46,7 @@ for port in 8080 8081 8082; do
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces.
# Please don't accidentally bork me with your fancy settings.
# Please don't accidentaly bork me with your fancy settings.
listeners=$(cat <<-PORTLISTENERS
# Configure server to listen on both $https_port and $port
# This overides some of the default settings above
@@ -80,8 +80,12 @@ for port in 8080 8081 8082; do
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
# Request keys directly from servers contacted over federation
echo 'trusted_key_servers: []'
# Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server'
echo 'trusted_key_servers:'
echo ' - server_name: "matrix.org"'
echo ' accept_keys_insecurely: true'
echo ''
# Allow the servers to communicate over localhost.
allow_list=$(cat <<-ALLOW_LIST

View File

@@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@@ -1,50 +0,0 @@
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Synapse development"
copyright = "2023, The Matrix.org Foundation C.I.C."
author = "The Synapse Maintainers and Community"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"autodoc2",
"myst_parser",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for Autodoc2 ----------------------------------------------------
autodoc2_docstring_parser_regexes = [
# this will render all docstrings as 'MyST' Markdown
(r".*", "myst"),
]
autodoc2_packages = [
{
"path": "../synapse",
# Don't render documentation for everything as a matter of course
"auto_mode": False,
},
]
# -- Options for MyST (Markdown) ---------------------------------------------
# myst_heading_anchors = 2
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "furo"
html_static_path = ["_static"]

View File

@@ -1,22 +0,0 @@
.. Synapse Developer Documentation documentation master file, created by
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to the Synapse Developer Documentation!
===========================================================
.. toctree::
:maxdepth: 2
:caption: Contents:
modules/federation_sender
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -1,5 +0,0 @@
Federation Sender
=================
```{autodoc2-docstring} synapse.federation.sender
```

View File

@@ -17,10 +17,16 @@
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.11
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
@@ -34,31 +40,16 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential curl git libffi-dev libssl-dev pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH.
# (Rust may be needed to compile `cryptography`---which is one of poetry's
# dependencies---on platforms that don't have a `cryptography` wheel.
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry==1.3.2"
pip install --user "poetry==1.2.0"
WORKDIR /synapse
@@ -79,9 +70,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
touch /synapse/requirements.txt; \
touch /synapse/requirements.txt; \
fi
###
@@ -91,24 +82,24 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH
@@ -149,9 +140,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
fi
###
@@ -166,20 +157,19 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
@@ -190,4 +180,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -36,10 +36,8 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
# Debian sid. TODO: Switch back to upstream once https://github.com/spotify/dh-virtualenv/pull/354 has merged.
RUN mkdir /dh-virtualenv
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/matrix-org/dh-virtualenv/archive/refs/tags/matrixorg-2023010302.tar.gz
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
# install its build deps. We do another apt-cache-update here, because we might

View File

@@ -6,7 +6,7 @@ set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
@@ -51,7 +51,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# -z True if the length of string is zero.
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
export SYNAPSE_WORKER_TYPES="\
event_persister:2, \
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
@@ -63,8 +64,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
synchrotron, \
client_reader, \
appservice, \
pusher, \
stream_writers=account_data+presence+receipts+to_device+typing"
pusher"
fi
log "Workers requested: $SYNAPSE_WORKER_TYPES"
@@ -76,17 +76,6 @@ else
fi
if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then
if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then
export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1"
else
export SYNAPSE_ASYNC_IO_REACTOR="1"
fi
else
export SYNAPSE_ASYNC_IO_REACTOR="0"
fi
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then

View File

@@ -94,16 +94,16 @@ allow_device_name_lookup_over_federation: true
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
{% if not workers_in_use %}
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
msc3381_polls_enabled: true
# Enable deleting device-specific notification settings stored in account data
msc3890_enabled: true
# Enable removing account data support
msc3391_enabled: true
{% endif %}
# Filtering /messages by relation type.
msc3874_enabled: true
# Enable removing account data support
msc3391_enabled: true
server_notices:
system_mxid_localpart: _server

View File

@@ -6,6 +6,10 @@
worker_app: "{{ app }}"
worker_name: "{{ name }}"
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: {{ port }}

View File

@@ -19,15 +19,8 @@
# The environment variables it reads are:
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
# below. Leave empty for no workers. Add a ':' and a number at the end to
# multiply that worker. Append multiple worker types with '+' to merge the
# worker types into a single worker. Add a name and a '=' to the front of a
# worker type to give this instance a name in logs and nginx.
# Examples:
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -47,35 +40,15 @@
import os
import platform
import re
import subprocess
import sys
from collections import defaultdict
from itertools import chain
from pathlib import Path
from typing import (
Any,
Dict,
List,
Mapping,
MutableMapping,
NoReturn,
Optional,
Set,
SupportsIndex,
)
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
import yaml
from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
MAIN_PROCESS_INSTANCE_NAME = "main"
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
MAIN_PROCESS_REPLICATION_PORT = 9093
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
# during processing with the name of the worker.
WORKER_PLACEHOLDER_NAME = "placeholder_name"
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
# Watching /_matrix/client needs a "client" listener
@@ -97,13 +70,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
],
"shared_extra_conf": {
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
},
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
"worker_extra_conf": "",
},
"media_repository": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.media_repository",
"listener_resources": ["media"],
"endpoint_patterns": [
"^/_matrix/media/",
@@ -116,7 +87,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
# The first configured media worker will run the media background jobs
"shared_extra_conf": {
"enable_media_repo": False,
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
"media_instance_running_background_jobs": "media_repository1",
},
"worker_extra_conf": "enable_media_repo: true",
},
@@ -124,9 +95,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
},
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -166,7 +135,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/versions$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
"^/_matrix/client/(r0|v3|unstable)/register$",
"^/_matrix/client/(r0|v3|unstable)/register/available$",
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
@@ -174,10 +142,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -227,9 +191,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
# This worker cannot be sharded. Therefore, there should only ever be one
# background worker. This is enforced for the safety of your database.
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
# This worker cannot be sharded. Therefore there should only ever be one background
# worker, and it should be named background_worker1
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
"worker_extra_conf": "",
},
"event_creator": {
@@ -240,7 +204,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
@@ -310,7 +273,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
"""
NGINX_UPSTREAM_CONFIG_BLOCK = """
upstream {upstream_worker_base_name} {{
upstream {upstream_worker_type} {{
{body}
}}
"""
@@ -361,7 +324,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
def add_worker_roles_to_shared_config(
shared_config: dict,
worker_types_set: Set[str],
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
@@ -369,36 +332,22 @@ def add_worker_roles_to_shared_config(
append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being
converted to YAML)
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
This list can be a single worker type or multiple.
shared_config: The config dict that all worker instances share (after being converted to YAML)
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
worker_name: The name of the worker instance.
worker_port: The HTTP replication port that the worker instance is listening on.
"""
# The instance_map config field marks the workers that write to various replication
# streams
# The instance_map config field marks the workers that write to various replication streams
instance_map = shared_config.setdefault("instance_map", {})
# This is a list of the stream_writers that there can be only one of. Events can be
# sharded, and therefore doesn't belong here.
singular_stream_writers = [
"account_data",
"presence",
"receipts",
"to_device",
"typing",
]
# Worker-type specific sharding config. Now a single worker can fulfill multiple
# roles, check each.
if "pusher" in worker_types_set:
# Worker-type specific sharding config
if worker_type == "pusher":
shared_config.setdefault("pusher_instances", []).append(worker_name)
if "federation_sender" in worker_types_set:
elif worker_type == "federation_sender":
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
if "event_persister" in worker_types_set:
elif worker_type == "event_persister":
# Event persisters write to the events stream, so we need to update
# the list of event stream writers
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
@@ -411,154 +360,19 @@ def add_worker_roles_to_shared_config(
"port": worker_port,
}
# Update the list of stream writers. It's convenient that the name of the worker
# type is the same as the stream to write. Iterate over the whole list in case there
# is more than one.
for worker in worker_types_set:
if worker in singular_stream_writers:
shared_config.setdefault("stream_writers", {}).setdefault(
worker, []
).append(worker_name)
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
# Update the list of stream writers
# It's convenient that the name of the worker type is the same as the stream to write
shared_config.setdefault("stream_writers", {}).setdefault(
worker_type, []
).append(worker_name)
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def merge_worker_template_configs(
existing_dict: Optional[Dict[str, Any]],
to_be_merged_dict: Dict[str, Any],
) -> Dict[str, Any]:
"""When given an existing dict of worker template configuration consisting with both
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
return new dict.
Args:
existing_dict: Either an existing worker template or a fresh blank one.
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
existing_dict.
Returns: The newly merged together dict values.
"""
new_dict: Dict[str, Any] = {}
if not existing_dict:
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
new_dict = to_be_merged_dict.copy()
else:
for i in to_be_merged_dict.keys():
if (i == "endpoint_patterns") or (i == "listener_resources"):
# merge the two lists, remove duplicates
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
elif i == "shared_extra_conf":
# merge dictionary's, the worker name will be replaced later
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
elif i == "worker_extra_conf":
# There is only one worker type that has a 'worker_extra_conf' and it is
# the media_repo. Since duplicate worker types on the same worker don't
# work, this is fine.
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
else:
# Everything else should be identical, like "app", which only works
# because all apps are now generic_workers.
new_dict[i] = to_be_merged_dict[i]
return new_dict
def insert_worker_name_for_worker_config(
existing_dict: Dict[str, Any], worker_name: str
) -> Dict[str, Any]:
"""Insert a given worker name into the worker's configuration dict.
Args:
existing_dict: The worker_config dict that is imported into shared_config.
worker_name: The name of the worker to insert.
Returns: Copy of the dict with newly inserted worker name
"""
dict_to_edit = existing_dict.copy()
for k, v in dict_to_edit["shared_extra_conf"].items():
# Only proceed if it's the placeholder name string
if v == WORKER_PLACEHOLDER_NAME:
dict_to_edit["shared_extra_conf"][k] = worker_name
return dict_to_edit
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
"""
Apply multiplier(if found) by returning a new expanded list with some basic error
checking.
Args:
worker_types: The unprocessed List of requested workers
Returns:
A new list with all requested workers expanded.
"""
# Checking performed:
# 1. if worker:2 or more is declared, it will create additional workers up to number
# 2. if worker:1, it will create a single copy of this worker as if no number was
# given
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
# scripting and automated expansion and is intended behaviour.
# 4. if worker:NaN or is a negative number, it will error and log it.
new_worker_types = []
for worker_type in worker_types:
if ":" in worker_type:
worker_type_components = split_and_strip_string(worker_type, ":", 1)
worker_count = 0
# Should only be 2 components, a type of worker(s) and an integer as a
# string. Cast the number as an int then it can be used as a counter.
try:
worker_count = int(worker_type_components[1])
except ValueError:
error(
f"Bad number in worker count for '{worker_type}': "
f"'{worker_type_components[1]}' is not an integer"
)
# As long as there are more than 0, we add one to the list to make below.
for _ in range(worker_count):
new_worker_types.append(worker_type_components[0])
else:
# If it's not a real worker_type, it will error out later.
new_worker_types.append(worker_type)
return new_worker_types
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
"""Helper to check to make sure worker types that cannot have multiples do not.
Args:
worker_type: The type of worker to check against.
Returns: True if allowed, False if not
"""
return worker_type not in [
"background_worker",
"account_data",
"presence",
"receipts",
"typing",
"to_device",
]
def split_and_strip_string(
given_string: str, split_char: str, max_split: SupportsIndex = -1
) -> List[str]:
"""
Helper to split a string on split_char and strip whitespace from each end of each
element.
Args:
given_string: The string to split
split_char: The character to split the string on
max_split: kwarg for split() to limit how many times the split() happens
Returns:
A List of strings
"""
# Removes whitespace from ends of result strings before adding to list. Allow for
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def generate_base_homeserver_config() -> None:
@@ -573,157 +387,33 @@ def generate_base_homeserver_config() -> None:
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def parse_worker_types(
requested_worker_types: List[str],
) -> Dict[str, Set[str]]:
"""Read the desired list of requested workers and prepare the data for use in
generating worker config files while also checking for potential gotchas.
Args:
requested_worker_types: The list formed from the split environment variable
containing the unprocessed requests for workers.
Returns: A dict of worker names to set of worker types. Format:
{'worker_name':
{'worker_type', 'worker_type2'}
}
"""
# A counter of worker_base_name -> int. Used for determining the name for a given
# worker when generating its config file, as each worker's name is just
# worker_base_name followed by instance number
worker_base_name_counter: Dict[str, int] = defaultdict(int)
# Similar to above, but more finely grained. This is used to determine we don't have
# more than a single worker for cases where multiples would be bad(e.g. presence).
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
# The final result of all this processing
dict_to_return: Dict[str, Set[str]] = {}
# Handle any multipliers requested for given workers.
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
requested_worker_types
)
# Process each worker_type_string
# Examples of expected formats:
# - requested_name=type1+type2+type3
# - synchrotron
# - event_creator+event_persister
for worker_type_string in multiple_processed_worker_types:
# First, if a name is requested, use that — otherwise generate one.
worker_base_name: str = ""
if "=" in worker_type_string:
# Split on "=", remove extra whitespace from ends then make list
worker_type_split = split_and_strip_string(worker_type_string, "=")
if len(worker_type_split) > 2:
error(
"There should only be one '=' in the worker type string. "
f"Please fix: {worker_type_string}"
)
# Assign the name
worker_base_name = worker_type_split[0]
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
# Apply a fairly narrow regex to the worker names. Some characters
# aren't safe for use in file paths or nginx configurations.
# Don't allow to end with a number because we'll add a number
# ourselves in a moment.
error(
"Invalid worker name; please choose a name consisting of "
"alphanumeric letters, _ + -, but not ending with a digit: "
f"{worker_base_name!r}"
)
# Continue processing the remainder of the worker_type string
# with the name override removed.
worker_type_string = worker_type_split[1]
# Split the worker_type_string on "+", remove whitespace from ends then make
# the list a set so it's deduplicated.
worker_types_set: Set[str] = set(
split_and_strip_string(worker_type_string, "+")
)
if not worker_base_name:
# No base name specified: generate one deterministically from set of
# types
worker_base_name = "+".join(sorted(worker_types_set))
# At this point, we have:
# worker_base_name which is the name for the worker, without counter.
# worker_types_set which is the set of worker types for this worker.
# Validate worker_type and make sure we don't allow sharding for a worker type
# that doesn't support it. Will error and stop if it is a problem,
# e.g. 'background_worker'.
for worker_type in worker_types_set:
# Verify this is a real defined worker type. If it's not, stop everything so
# it can be fixed.
if worker_type not in WORKERS_CONFIG:
error(
f"{worker_type} is an unknown worker type! Was found in "
f"'{worker_type_string}'. Please fix!"
)
if worker_type in worker_type_shard_counter:
if not is_sharding_allowed_for_worker_type(worker_type):
error(
f"There can be only a single worker with {worker_type} "
"type. Please recount and remove."
)
# Not in shard counter, must not have seen it yet, add it.
worker_type_shard_counter[worker_type] += 1
# Generate the number for the worker using incrementing counter
worker_base_name_counter[worker_base_name] += 1
worker_number = worker_base_name_counter[worker_base_name]
worker_name = f"{worker_base_name}{worker_number}"
if worker_number > 1:
# If this isn't the first worker, check that we don't have a confusing
# mixture of worker types with the same base name.
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
if first_worker_with_base_name != worker_types_set:
error(
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
f"{worker_types_set!r}. It is already in use by "
f"worker_type(s): {first_worker_with_base_name!r}"
)
dict_to_return[worker_name] = worker_types_set
return dict_to_return
def generate_worker_files(
environ: Mapping[str, str],
config_path: str,
data_dir: str,
requested_worker_types: Dict[str, Set[str]],
environ: Mapping[str, str], config_path: str, data_dir: str
) -> None:
"""Read the desired workers(if any) that is passed in and generate shared
homeserver, nginx and supervisord configs.
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
requested_worker_types: A Dict containing requested workers in the format of
{'worker_name1': {'worker_type', ...}}
"""
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
# shared_config is the contents of a Synapse config file that will be shared amongst
# the main Synapse process as well as all workers.
# It is intended mainly for disabling functionality when certain workers are spun up,
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result to the shared
# config file.
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources": [{"names": ["replication"]}],
}
@@ -735,9 +425,9 @@ def generate_worker_files(
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
# base shared worker jinja2 template. This config file will be passed to all
# workers, included Synapse's main process. It is intended mainly for disabling
# functionality when certain workers are spun up, and adding a replication listener.
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# List of dicts that describe workers.
@@ -745,20 +435,31 @@ def generate_worker_files(
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
# Upstreams for load-balancing purposes. This dict takes the form of the worker
# type to the ports of each worker. For example:
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
# will be placed after the proxy_pass directive. The main benefit to representing
# this data as a dict over a str is that we can easily deduplicate endpoints
# across multiple instances of the same worker. The final rendering will be combined
# with nginx_upstreams and placed in /etc/nginx/conf.d.
nginx_locations: Dict[str, str] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
# dict over a str is that we can easily deduplicate endpoints across multiple instances
# of the same worker.
#
# An nginx site config that will be amended to depending on the workers that are
# spun up. To be placed in /etc/nginx/conf.d.
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
if not worker_types_env:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma, ignoring whitespace.
worker_types = [x.strip() for x in worker_types_env.split(",")]
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -766,57 +467,66 @@ def generate_worker_files(
# Start worker ports from this arbitrary port
worker_port = 18009
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
# Get the set of all worker types that we have configured
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
# Map locations to upstreams (corresponding to worker types) in Nginx
# but only if we use the appropriate worker type
for worker_type in all_worker_types_in_use:
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
error(worker_type + " is an unknown worker type! Please fix!")
# For each worker type specified by the user, create config values and write it's
# yaml config file
for worker_name, worker_types_set in requested_worker_types.items():
# The collected and processed data will live here.
worker_config: Dict[str, Any] = {}
# Merge all worker config templates for this worker into a single config
for worker_type in worker_types_set:
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
# Merge worker type template configuration data. It's a combination of lists
# and dicts, so use this helper.
worker_config = merge_worker_template_configs(
worker_config, copy_of_template_config
)
# Replace placeholder names in the config template with the actual worker name.
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
# Name workers by their type concatenated with an incrementing number
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
# Update the shared config with any worker_type specific options. The first of a
# given worker_type needs to stay assigned and not be replaced.
worker_config["shared_extra_conf"].update(shared_config)
shared_config = worker_config["shared_extra_conf"]
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
shared_config, worker_types_set, worker_name, worker_port
shared_config, worker_type, worker_name, worker_port
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
# Determine whether we need to load-balance this worker
if worker_type_total_count > 1:
# Create or add to a load-balanced upstream for this worker
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
# Upstreams are named after the worker_type
upstream = "http://" + worker_type
else:
upstream = "http://localhost:%d" % (worker_port,)
# Note that this endpoint should proxy to this upstream
nginx_locations[pattern] = upstream
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Then a worker config file
@@ -827,10 +537,6 @@ def generate_worker_files(
worker_log_config_filepath=log_config_filepath,
)
# Save this worker's port number to the correct nginx upstreams
for worker_type in worker_types_set:
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
worker_port += 1
# Build the nginx location config blocks
@@ -843,14 +549,15 @@ def generate_worker_files(
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
body += " server localhost:%d;\n" % (port,)
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
upstream_worker_base_name=upstream_worker_base_name,
upstream_worker_type=upstream_worker_type,
body=body,
)
@@ -871,15 +578,7 @@ def generate_worker_files(
if reg_path.suffix.lower() in (".yaml", ".yml")
]
workers_in_use = len(requested_worker_types) > 0
# If there are workers, add the main process to the instance_map too.
if workers_in_use:
instance_map = shared_config.setdefault("instance_map", {})
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
workers_in_use = len(worker_types) > 0
# Shared homeserver config
convert(
@@ -975,34 +674,17 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
if not os.path.exists(config_path):
log("Generating base homeserver config")
generate_base_homeserver_config()
else:
log("Base homeserver config exists—not regenerating")
# This script may be run multiple times (mostly by Complement, see note at top of
# file). Don't re-configure workers in this instance.
# This script may be run multiple times (mostly by Complement, see note at top of file).
# Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
if not os.path.exists(mark_filepath):
# Collect and validate worker_type requests
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
# Only process worker_types if they exist
if not worker_types_env:
# No workers, just the main process
worker_types = []
requested_worker_types: Dict[str, Any] = {}
else:
# Split type names by comma, ignoring whitespace.
worker_types = split_and_strip_string(worker_types_env, ",")
requested_worker_types = parse_worker_types(worker_types)
# Always regenerate all other config files
log("Generating worker config files")
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
generate_worker_files(environ, config_path, data_dir)
# Mark workers as being configured
with open(mark_filepath, "w") as f:
f.write("")
else:
log("Worker config exists—not regenerating")
# Lifted right out of start.py
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)

View File

@@ -57,7 +57,6 @@
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Event Reports](admin_api/event_reports.md)
- [Experimental Features](admin_api/experimental_features.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
- [Register Users](admin_api/register_api.md)
@@ -98,7 +97,6 @@
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)
- [Single Sign-On]()
- [SAML](development/saml.md)

View File

@@ -5,7 +5,7 @@ use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Renew account

View File

@@ -3,7 +3,7 @@
This API returns information about reported events.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The api is:
```
@@ -169,17 +169,3 @@ The following fields are returned in the JSON response body:
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
have a canonical alias set.
* `event_json`: object - Details of the original event that was reported.
# Delete a specific event report
This API deletes a specific event report. If the request is successful, the response body
will be an empty JSON object.
The api is:
```
DELETE /_synapse/admin/v1/event_reports/<report_id>
```
**URL parameters:**
* `report_id`: string - The ID of the event report.

View File

@@ -1,55 +0,0 @@
# Experimental Features API
This API allows a server administrator to enable or disable some experimental features on a per-user
basis. The currently supported features are:
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
presence state enabled
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
UIA when first uploading cross-signing keys.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
## Enabling/Disabling Features
This API allows a server administrator to enable experimental features for a given user. The request must
provide a body containing the user id and listing the features to enable/disable in the following format:
```json
{
"features": {
"msc3026":true,
"msc3881":true
}
}
```
where true is used to enable the feature, and false is used to disable the feature.
The API is:
```
PUT /_synapse/admin/v1/experimental_features/<user_id>
```
## Listing Enabled Features
To list which features are enabled/disabled for a given user send a request to the following API:
```
GET /_synapse/admin/v1/experimental_features/<user_id>
```
It will return a list of possible features and indicate whether they are enabled or disabled for the
user like so:
```json
{
"features": {
"msc3026": true,
"msc3881": false,
"msc3967": false
}
}
```

View File

@@ -6,7 +6,7 @@ Details about the format of the `media_id` and storage of the media in the file
are documented under [media repository](../media_repository.md).
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## List all media in a room
@@ -235,14 +235,6 @@ The following fields are returned in the JSON response body:
Request:
```
POST /_synapse/admin/v1/media/delete?before_ts=<before_ts>
{}
```
*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint:
```
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
@@ -251,7 +243,7 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.*
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
Files that were last used before this timestamp will be deleted. It is the timestamp of
last access, not the timestamp when the file was created.

View File

@@ -11,7 +11,7 @@ Note that Synapse requires at least one message in each room, so it will never
delete the last message in a room.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:

View File

@@ -6,7 +6,7 @@ local users. The server administrator must be in the room and have permission to
invite users.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Parameters

View File

@@ -5,7 +5,7 @@ server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
**Parameters**
@@ -400,7 +400,7 @@ sent to a room in a given timeframe. There are various parameters available
that allow for filtering and ordering the returned list. This API supports pagination.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).

View File

@@ -4,7 +4,7 @@ Returns information about all local media usage of users. Gives the
possibility to filter them by time and user.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
@@ -81,52 +81,3 @@ The following fields are returned in the JSON response body:
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
* `next_token` - integer - Opaque value used for pagination. See above.
* `total` - integer - Total number of users after filtering.
# Get largest rooms by size in database
Returns the 10 largest rooms and an estimate of how much space in the database
they are taking.
This does not include the size of any associated media associated with the room.
Returns an error on SQLite.
*Note:* This uses the planner statistics from PostgreSQL to do the estimates,
which means that the returned information can vary widely from reality. However,
it should be enough to get a rough idea of where database disk space is going.
The API is:
```
GET /_synapse/admin/v1/statistics/database/rooms
```
A response body like the following is returned:
```json
{
"rooms": [
{
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
"estimated_size": 47325417353
}
],
}
```
**Response**
The following fields are returned in the JSON response body:
* `rooms` - An array of objects, sorted by largest room first. Objects contain
the following fields:
- `room_id` - string - The room ID.
- `estimated_size` - integer - Estimated disk space used in bytes by the room
in the database.
*Added in Synapse 1.83.0*

View File

@@ -1,7 +1,7 @@
# User Admin API
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
for a server admin: see [Admin API](../usage/administration/admin_api).
## Query User Account
@@ -62,7 +62,7 @@ URL parameters:
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
## Create or modify account
## Create or modify Account
This API allows an administrator to create or modify a user account with a
specific `user_id`.
@@ -78,29 +78,28 @@ with a body of:
```json
{
"password": "user_password",
"logout_devices": false,
"displayname": "Alice Marigold",
"avatar_url": "mxc://example.com/abcde12345",
"displayname": "User",
"threepids": [
{
"medium": "email",
"address": "alice@example.com"
"address": "<user_mail_1>"
},
{
"medium": "email",
"address": "alice@domain.org"
"address": "<user_mail_2>"
}
],
"external_ids": [
{
"auth_provider": "example",
"external_id": "12345"
"auth_provider": "<provider1>",
"external_id": "<user_id_provider_1>"
},
{
"auth_provider": "example2",
"external_id": "abc54321"
"auth_provider": "<provider2>",
"external_id": "<user_id_provider_2>"
}
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false,
"user_type": null
@@ -113,51 +112,41 @@ Returns HTTP status code:
URL parameters:
- `user_id` - A fully-qualified user id. For example, `@user:server.com`.
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
Body parameters:
- `password` - **string**, optional. If provided, the user's password is updated and all
- `password` - string, optional. If provided, the user's password is updated and all
devices are logged out, unless `logout_devices` is set to `false`.
- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
logged out even when `password` is provided.
- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name
will be removed.
- `avatar_url` - **string**, optional. Must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
If set to an empty string (`""`), the user's avatar is removed.
- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are
entirely replaced with the given list. Each item in the array is an object with the following
fields:
- `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number).
- `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or
`447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number
with country code "1") for `msisdn`.
Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove
that threepid from any identity servers it is aware has a binding for it.
- `external_ids` - **array**, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). More details are in the configuration manual under the
- `displayname` - string, optional, defaults to the value of `user_id`.
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
- `address` - string. Value of third-party ID.
belonging to a user.
- `external_ids` - array, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). Details in the configuration manual under the
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
provided value is not in the homeserver configuration.
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
When the user logs in to the identity provider, this must be the unique ID that they map to.
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
granting them access to the Admin API, among other things.
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided
value is not in the homeserver configuration.
- `external_id` - string, user ID in the external identity provider.
- `avatar_url` - string, optional, must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
- `admin` - bool, optional, defaults to `false`.
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
unchanged on existing accounts and set to `false` for new accounts.
A user cannot be erased by deactivating with this API. For details on
deactivating users see [Deactivate Account](#deactivate-account).
- `user_type` - string or null, optional. If provided, the user type will be
adjusted. If `null` given, the user type will be cleared. Other
allowed options are: `bot` and `support`.
Note: the `password` field must also be set if both of the following are true:
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
- Users are allowed to set their password on this homeserver (both `password_config.enabled` and
`password_config.localdb_enabled` config options are set to `true`).
Users' passwords are wiped upon account deactivation, hence the need to set a new one here.
If the user already exists then optional parameters default to the current value.
Note: a user cannot be erased with this API. For more details on
deactivating and erasing users see [Deactivate Account](#deactivate-account).
- `user_type` - **string** or null, optional. If not provided, the user type will be
not be changed. If `null` is given, the user type will be cleared.
Other allowed options are: `bot` and `support`.
In order to re-activate an account `deactivated` must be set to `false`. If
users do not login via single-sign-on, a new `password` must be provided.
## List Accounts
@@ -813,33 +802,6 @@ The following fields are returned in the JSON response body:
- `total` - Total number of user's devices.
### Create a device
Creates a new device for a specific `user_id` and `device_id`. Does nothing if the `device_id`
exists already.
The API is:
```
POST /_synapse/admin/v2/users/<user_id>/devices
{
"device_id": "QBUAZIFURK"
}
```
An empty JSON dict is returned.
**Parameters**
The following parameters should be set in the URL:
- `user_id` - fully qualified: for example, `@user:server.com`.
The following fields are required in the JSON request body:
- `device_id` - The device ID to create.
### Delete multiple devices
Deletes the given devices for a specific `user_id`, and invalidates
any access token associated with them.

View File

@@ -15,7 +15,6 @@ app_service_config_files:
The format of the AS configuration file is as follows:
```yaml
id: <your-AS-id>
url: <base url of AS>
as_token: <token AS will add to requests to HS>
hs_token: <token HS will add to requests to AS>

View File

@@ -10,17 +10,26 @@ The necessary tools are:
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions
on how to install the above tools and run the linters.
Install them with:
```sh
pip install -e ".[lint,mypy]"
```
The easiest way to run the lints is to invoke the linter script as follows.
```sh
scripts-dev/lint.sh
```
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient
development workflow. It is not, however, recommended to run `mypy`
on save as it takes a while and can be very resource intensive.
development workflow. It is not, however, recommended to run `flake8` or `mypy`
on save as they take a while and can be very resource intensive.
## General rules

View File

@@ -73,15 +73,6 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge
not recommended, as it can be difficult to configure the TLS certificates correctly in
this case, and it offers little advantage over `.well-known` delegation.
Please keep in mind that server delegation is a function of server-server communication,
and as such using SRV DNS records will not cover use cases involving client-server comms.
This means setting global client settings (such as a Jitsi endpoint, or disabling
creating new rooms as encrypted by default, etc) will still require that you serve a file
from the `https://<server_name>/.well-known/` endpoints defined in the spec! If you are
considering using SRV DNS delegation to avoid serving files from this endpoint, consider
the impact that you will not be able to change those client-based default values globally,
and will be relegated to the featureset of the configuration of each individual client.
However, if you really need it, you can find some documentation on what such a
record should look like and how Synapse will use it in [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).

View File

@@ -22,17 +22,15 @@ on Windows is not officially supported.
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough.
A recent version of the Rust compiler is needed to build the native modules. The
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
A recent version of the Rust compiler is needed to build the native modules. The
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
# 3. Get the source.
@@ -53,11 +51,6 @@ can find many good git tutorials on the web.
# 4. Install the dependencies
Before installing the Python dependencies, make sure you have installed a recent version
of Rust (see the "What do I need?" section above). The easiest way of installing the
latest version is to use [rustup](https://rustup.rs/).
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
and development environment. Once you have installed Python 3 and added the
source, you should install `poetry`.
@@ -72,7 +65,7 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Developing Synapse requires Poetry version 1.3.2 or later.
Synapse requires Poetry version 1.2.0 or later.
Next, open a terminal and install dependencies as follows:
@@ -81,39 +74,8 @@ cd path/where/you/have/cloned/the/repository
poetry install --extras all
```
This will install the runtime and developer dependencies for the project. Be sure to check
that the `poetry install` step completed cleanly.
This will install the runtime and developer dependencies for the project.
## Running Synapse via poetry
To start a local instance of Synapse in the locked poetry environment, create a config file:
```sh
cp docs/sample_config.yaml homeserver.yaml
cp docs/sample_log_config.yaml log_config.yaml
```
Now edit `homeserver.yaml`, things you might want to change include:
- Set a `server_name`
- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied
- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database)
- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user).
And then run Synapse with the following command:
```sh
poetry run python -m synapse.app.homeserver -c homeserver.yaml
```
If you get an error like the following:
```
importlib.metadata.PackageNotFoundError: matrix-synapse
```
this probably indicates that the `poetry install` step did not complete cleanly - go back and
resolve any issues and re-run until successful.
# 5. Get in touch.
@@ -142,8 +104,8 @@ regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
Synapse's code style is documented [here](../code_style.md). Please follow
it, including the conventions for [configuration
options and documentation](../code_style.md#configuration-code-and-documentation-format).
it, including the conventions for the [sample configuration
file](../code_style.md#configuration-file-format).
We welcome improvements and additions to our documentation itself! When
writing new pages, please
@@ -162,7 +124,7 @@ changes to the Rust code.
# 8. Test, test, test!
<a name="test-test-test" id="test-test-test"></a>
<a name="test-test-test"></a>
While you're developing and before submitting a patch, you'll
want to test your code.
@@ -368,8 +330,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -420,7 +380,7 @@ To prepare a Pull Request, please:
## Changelog
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by [Towncrier](https://github.com/twisted/towncrier).
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the `changelog.d` directory named
in the format of `PRnumber.type`. The type can be one of the following:
@@ -462,7 +422,8 @@ chicken-and-egg problem.
There are two options for solving this:
1. Open the PR without a changelog file, see what number you got, and *then*
add the changelog file to your branch, or:
add the changelog file to your branch (see [Updating your pull
request](#updating-your-pull-request)), or:
1. Look at the [list of all
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the

View File

@@ -155,11 +155,43 @@ def run_upgrade(
Boolean columns require special treatment, since SQLite treats booleans the
same as integers.
Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
There are three separate aspects to this:
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
the integer value from SQLite to a boolean before writing the value to the
postgres database.
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
supported. This makes it necessary to avoid using `TRUE` and `FALSE`
constants in SQL commands.
For example, to insert a `TRUE` value into the database, write:
```python
txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, ))
```
* Default values for new boolean columns present a particular
difficulty. Generally it is best to create separate schema files for
Postgres and SQLite. For example:
```sql
# in 00delta.sql.postgres:
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE;
```
```sql
# in 00delta.sql.sqlite:
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0;
```
Note that there is a particularly insidious failure mode here: the Postgres
flavour will be accepted by SQLite 3.22, but will give a column whose
default value is the **string** `"FALSE"` - which, when cast back to a boolean
in Python, evaluates to `True`.
## `event_id` global uniqueness

View File

@@ -2,13 +2,6 @@
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Installing
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
Developers should use Poetry 1.3.2 or higher. If you encounter problems related
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
@@ -130,7 +123,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
## ...reset my venv to the locked environment?
```shell
poetry install --all-extras --sync
poetry install --extras all --remove-untracked
```
## ...delete everything and start over from scratch?
@@ -190,6 +183,7 @@ Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
@@ -202,7 +196,7 @@ poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
@@ -246,6 +240,9 @@ poetry export --extras all
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
@@ -258,28 +255,12 @@ because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.
## ...handle a Dependabot pull request?
Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file
up-to-date with the latest releases of our dependencies. The changelog check is
omitted for Dependabot PRs; the release script will include them in the
changelog.
When reviewing a dependabot PR, ensure that:
* the lockfile changes look reasonable;
* the upstream changelog file (linked in the description) doesn't include any
breaking changes;
* continuous integration passes.
In particular, any updates to the type hints (usually packages which start with `types-`)
should be safe to merge if linting passes.
# Troubleshooting
## Check the version of poetry with `poetry --version`.
The minimum version of poetry supported by Synapse is 1.3.2.
The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep

View File

@@ -1,375 +0,0 @@
# How do faster joins work?
This is a work-in-progress set of notes with two goals:
- act as a reference, explaining how Synapse implements faster joins; and
- record the rationale behind our choices.
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
## Overview: processing events in a partially-joined room
The response to a partial join consists of
- the requested join event `J`,
- a list of the servers in the room (according to the state before `J`),
- a subset of the state of the room before `J`,
- the full auth chain of that state subset.
Synapse marks the room as partially joined by adding a row to the database table
`partial_state_rooms`. It also marks the join event `J` as "partially stated",
meaning that we have neither received nor computed the full state before/after
`J`. This is done by adding a row to `partial_state_events`.
<details><summary>DB schema</summary>
```
matrix=> \d partial_state_events
Table "matrix.partial_state_events"
Column │ Type │ Collation │ Nullable │ Default
══════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
event_id │ text │ │ not null │
matrix=> \d partial_state_rooms
Table "matrix.partial_state_rooms"
Column │ Type │ Collation │ Nullable │ Default
════════════════════════╪════════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
device_lists_stream_id │ bigint │ │ not null │ 0
join_event_id │ text │ │ │
joined_via │ text │ │ │
matrix=> \d partial_state_rooms_servers
Table "matrix.partial_state_rooms_servers"
Column │ Type │ Collation │ Nullable │ Default
═════════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
server_name │ text │ │ not null │
```
Indices, foreign-keys and check constraints are omitted for brevity.
</details>
While partially joined to a room, Synapse receives events `E` from remote
homeservers as normal, and can create events at the request of its local users.
However, we run into trouble when we enforce the [checks on an event].
> 1. Is a valid event, otherwise it is dropped. For an event to be valid, it
must contain a room_id, and it must comply with the event format of that
> room version.
> 2. Passes signature checks, otherwise it is dropped.
> 3. Passes hash checks, otherwise it is redacted before being processed further.
> 4. Passes authorization rules based on the events auth events, otherwise it
> is rejected.
> 5. **Passes authorization rules based on the state before the event, otherwise
> it is rejected.**
> 6. **Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.**
[checks on an event]: https://spec.matrix.org/v1.5/server-server-api/#checks-performed-on-receipt-of-a-pdu
We can enforce checks 1--4 without any problems.
But we cannot enforce checks 5 or 6 with complete certainty, since Synapse does
not know the full state before `E`, nor that of the room.
### Partial state
Instead, we make a best-effort approximation.
While the room is considered partially joined, Synapse tracks the "partial
state" before events.
This works in a similar way as regular state:
- The partial state before `J` is that given to us by the partial join response.
- The partial state before an event `E` is the resolution of the partial states
after each of `E`'s `prev_event`s.
- If `E` is rejected or a message event, the partial state after `E` is the
partial state before `E`.
- Otherwise, the partial state after `E` is the partial state before `E`, plus
`E` itself.
More concisely, partial state propagates just like full state; the only
difference is that we "seed" it with an incomplete initial state.
Synapse records that we have only calculated partial state for this event with
a row in `partial_state_events`.
While the room remains partially stated, check 5 on incoming events to that
room becomes:
> 5. Passes authorization rules based on **the resolution between the partial
> state before `E` and `E`'s auth events.** If the event fails to pass
> authorization rules, it is rejected.
Additionally, check 6 is deleted: no soft-failures are enforced.
While partially joined, the current partial state of the room is defined as the
resolution across the partial states after all forward extremities in the room.
_Remark._ Events with partial state are _not_ considered
[outliers](../room-dag-concepts.md#outliers).
### Approximation error
Using partial state means the auth checks can fail in a few different ways[^2].
[^2]: Is this exhaustive?
- We may erroneously accept an incoming event in check 5 based on partial state
when it would have been rejected based on full state, or vice versa.
- This means that an event could erroneously be added to the current partial
state of the room when it would not be present in the full state of the room,
or vice versa.
- Additionally, we may have skipped soft-failing an event that would have been
soft-failed based on full state.
(Note that the discrepancies described in the last two bullets are user-visible.)
This means that we have to be very careful when we want to lookup pieces of room
state in a partially-joined room. Our approximation of the state may be
incorrect or missing. But we can make some educated guesses. If
- our partial state is likely to be correct, or
- the consequences of our partial state being incorrect are minor,
then we proceed as normal, and let the resync process fix up any mistakes (see
below).
When is our partial state likely to be correct?
- It's more accurate the closer we are to the partial join event. (So we should
ideally complete the resync as soon as possible.)
- Non-member events: we will have received them as part of the partial join
response, if they were part of the room state at that point. We may
incorrectly accept or reject updates to that state (at first because we lack
remote membership information; later because of compounding errors), so these
can become incorrect over time.
- Local members' memberships: we are the only ones who can create join and
knock events for our users. We can't be completely confident in the
correctness of bans, invites and kicks from other homeservers, but the resync
process should correct any mistakes.
- Remote members' memberships: we did not receive these in the /send_join
response, so we have essentially no idea if these are correct or not.
In short, we deem it acceptable to trust the partial state for non-membership
and local membership events. For remote membership events, we wait for the
resync to complete, at which point we have the full state of the room and can
proceed as normal.
### Fixing the approximation with a resync
The partial-state approximation is only a temporary affair. In the background,
synapse beings a "resync" process. This is a continuous loop, starting at the
partial join event and proceeding downwards through the event graph. For each
`E` seen in the room since partial join, Synapse will fetch
- the event ids in the state of the room before `E`, via
[`/state_ids`](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1state_idsroomid);
- the event ids in the full auth chain of `E`, included in the `/state_ids`
response; and
- any events from the previous two bullets that Synapse hasn't persisted, via
[`/state](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1stateroomid).
This means Synapse has (or can compute) the full state before `E`, which allows
Synapse to properly authorise or reject `E`. At this point ,the event
is considered to have "full state" rather than "partial state". We record this
by removing `E` from the `partial_state_events` table.
\[**TODO:** Does Synapse persist a new state group for the full state
before `E`, or do we alter the (partial-)state group in-place? Are state groups
ever marked as partially-stated? \]
This scheme means it is possible for us to have accepted and sent an event to
clients, only to reject it during the resync. From a client's perspective, the
effect is similar to a retroactive
state change due to state resolution---i.e. a "state reset".[^3]
[^3]: Clients should refresh caches to detect such a change. Rumour has it that
sliding sync will fix this.
When all events since the join `J` have been fully-stated, the room resync
process is complete. We record this by removing the room from
`partial_state_rooms`.
## Faster joins on workers
For the time being, the resync process happens on the master worker.
A new replication stream `un_partial_stated_room` is added. Whenever a resync
completes and a partial-state room becomes fully stated, a new message is sent
into that stream containing the room ID.
## Notes on specific cases
> **NB.** The notes below are rough. Some of them are hidden under `<details>`
disclosures because they have yet to be implemented in mainline Synapse.
### Creating events during a partial join
When sending out messages during a partial join, we assume our partial state is
accurate and proceed as normal. For this to have any hope of succeeding at all,
our partial state must contain an entry for each of the (type, state key) pairs
[specified by the auth rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules):
- `m.room.create`
- `m.room.join_rules`
- `m.room.power_levels`
- `m.room.third_party_invite`
- `m.room.member`
The first four of these should be present in the state before `J` that is given
to us in the partial join response; only membership events are omitted. In order
for us to consider the user joined, we must have their membership event. That
means the only possible omission is the target's membership in an invite, kick
or ban.
The worst possibility is that we locally invite someone who is banned according to
the full state, because we lack their ban in our current partial state. The rest
of the federation---at least, those who are fully joined---should correctly
enforce the [membership transition constraints](
https://spec.matrix.org/v1.3/client-server-api/#room-membership
). So any the erroneous invite should be ignored by fully-joined
homeservers and resolved by the resync for partially-joined homeservers.
In more generality, there are two problems we're worrying about here:
- We might create an event that is valid under our partial state, only to later
find out that is actually invalid according to the full state.
- Or: we might refuse to create an event that is invalid under our partial
state, even though it would be perfectly valid under the full state.
However we expect such problems to be unlikely in practise, because
- We trust that the room has sensible power levels, e.g. that bad actors with
high power levels are demoted before their ban.
- We trust that the resident server provides us up-to-date power levels, join
rules, etc.
- State changes in rooms are relatively infrequent, and the resync period is
relatively quick.
#### Sending out the event over federation
**TODO:** needs prose fleshing out.
Normally: send out in a fed txn to all HSes in the room.
We only know that some HSes were in the room at some point. Wat do.
Send it out to the list of servers from the first join.
**TODO** what do we do here if we have full state?
If the prev event was created by us, we can risk sending it to the wrong HS. (Motivation: privacy concern of the content. Not such a big deal for a public room or an encrypted room. But non-encrypted invite-only...)
But don't want to send out sensitive data in other HS's events in this way.
Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do.
What about if we didn't send them an event but shouldn't've?
E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them.
Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left.
Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?)
- Don't do this currently.
- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages
- Gap: rooms which are infrequently used and take a long time to resync.
### Joining after a partial join
**NB.** Not yet implemented.
<details>
**TODO:** needs prose fleshing out. Liase with Matthieu. Explain why /send_join
(Rich was surprised we didn't just create it locally. Answer: to try and avoid
a join which then gets rejected after resync.)
We don't know for sure that any join we create would be accepted.
E.g. the joined user might have been banned; the join rules might have changed in a way that we didn't realise... some way in which the partial state was mistaken.
Instead, do another partial make-join/send-join handshake to confirm that the join works.
- Probably going to get a bunch of duplicate state events and auth events.... but the point of partial joins is that these should be small. Many are already persisted = good.
- What if the second send_join response includes a different list of reisdent HSes? Could ignore it.
- Could even have a special flag that says "just make me a join", i.e. don't bother giving me state or servers in room. Deffo want the auth chain tho.
- SQ: wrt device lists it's a lot safer to ignore it!!!!!
- What if the state at the second join is inconsistent with what we have? Ignore it?
</details>
### Leaving (and kicks and bans) after a partial join
**NB.** Not yet implemented.
<details>
When you're fully joined to a room, to have `U` leave a room their homeserver
needs to
- create a new leave event for `U` which will be accepted by other homeservers,
and
- send that event `U` out to the homeservers in the federation.
When is a leave event accepted? See
[v10 auth rules](https://spec.matrix.org/v1.5/rooms/v10/#authorization-rules):
> 4. If type is m.room.member: [...]
>
> 5. If membership is leave:
>
> 1. If the sender matches state_key, allow if and only if that users current membership state is invite, join, or knock.
> 2. [...]
I think this means that (well-formed!) self-leaves are governed entirely by
4.5.1. This means that if we correctly calculate state which says that `U` is
invited, joined or knocked and include it in the leave's auth events, our event
is accepted by checks 4 and 5 on incoming events.
> 4. Passes authorization rules based on the events auth events, otherwise
> it is rejected.
> 5. Passes authorization rules based on the state before the event, otherwise
> it is rejected.
The only way to fail check 6 is if the receiving server's current state of the
room says that `U` is banned, has left, or has no membership event. But this is
fine: the receiving server already thinks that `U` isn't in the room.
> 6. Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.
For the second point (publishing the leave event), the best thing we can do is
to is publish to all HSes we know to be currently in the room. If they miss that
event, they might send us traffic in the room that we don't care about. This is
a problem with leaving after a "full" join; we don't seek to fix this with
partial joins.
(With that said: there's nothing machine-readable in the /send response. I don't
think we can deduce "destination has left the room" from a failure to /send an
event into that room?)
#### Can we still do this during a partial join?
We can create leave events and can choose what gets included in our auth events,
so we can be sure that we pass check 4 on incoming events. For check 5, we might
have an incorrect view of the state before an event.
The only way we might erroneously think a leave is valid is if
- the partial state before the leave has `U` joined, invited or knocked, but
- the full state before the leave has `U` banned, left or not present,
in which case the leave doesn't make anything worse: other HSes already consider
us as not in the room, and will continue to do so after seeing the leave.
The remaining obstacle is then: can we safely broadcast the leave event? We may
miss servers or incorrectly think that a server is in the room. Or the
destination server may be offline and miss the transaction containing our leave
event.This should self-heal when they see an event whose `prev_events` descends
from our leave.
Another option we considered was to use federation `/send_leave` to ask a
fully-joined server to send out the event on our behalf. But that introduces
complexity without much benefit. Besides, as Rich put it,
> sending out leaves is pretty best-effort currently
so this is probably good enough as-is.
#### Cleanup after the last leave
**TODO**: what cleanup is necessary? Is it all just nice-to-have to save unused
work?
</details>

View File

@@ -46,9 +46,6 @@ instead.
If the authentication is unsuccessful, the module must return `None`.
Note that the user is not automatically registered, the `register_user(..)` method of
the [module API](writing_a_module.html) can be used to lazily create users.
If multiple modules register an auth checker for the same login type but with different
fields, Synapse will refuse to start.
@@ -106,9 +103,6 @@ Called during a logout request for a user. It is passed the qualified user ID, t
deactivated device (if any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them
to still be present.
If multiple modules implement this callback, Synapse runs them all in order.
### `get_username_for_registration`

View File

@@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
```python
async def check_media_file_for_spam(
file_wrapper: "synapse.media.media_storage.ReadableFileWrapper",
file_info: "synapse.media._base.FileInfo",
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
file_info: "synapse.rest.media.v1._base.FileInfo",
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```

View File

@@ -146,9 +146,6 @@ Note that this callback is called when the event has already been processed and
into the room, which means this callback cannot be used to deny persisting the event. To
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
For any given event, this callback will be called on every worker process, even if that worker will not end up
acting on that event. This callback will not be called for events that are marked as rejected.
If multiple modules implement this callback, Synapse runs them all in order.
### `check_can_shutdown_room`
@@ -254,11 +251,6 @@ If multiple modules implement this callback, Synapse runs them all in order.
_First introduced in Synapse v1.56.0_
**<span style="color:red">
This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which
features the same functionality. The only difference is in name.
</span>**
```python
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
```
@@ -273,44 +265,6 @@ server_.
If multiple modules implement this callback, Synapse runs them all in order.
### `on_add_user_third_party_identifier`
_First introduced in Synapse v1.79.0_
```python
async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
```
Called after successfully creating an association between a user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier (i.e. an email address).
Note that this callback is _not_ called if a user attempts to bind their third-party identifier
to an identity server (via a call to [`POST
/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)).
If multiple modules implement this callback, Synapse runs them all in order.
### `on_remove_user_third_party_identifier`
_First introduced in Synapse v1.79.0_
```python
async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
```
Called after successfully removing an association between a user and a third-party identifier
(email address, phone number). The module is given the Matrix ID of the user the
association is for, as well as the medium (`email` or `msisdn`) and address of the
third-party identifier (i.e. an email address).
Note that this callback is _not_ called if a user attempts to unbind their third-party
identifier from an identity server (via a call to [`POST
/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)).
If multiple modules implement this callback, Synapse runs them all in order.
## Example
The example below is a module that implements the third-party rules callback
@@ -343,4 +297,4 @@ class EventCensorer:
)
event_dict["content"] = new_event_content
return event_dict
```
```

View File

@@ -59,8 +59,8 @@ namespace (such as anything under `/_matrix/client` for example). It is strongly
recommended that modules register their web resources under the `/_synapse/client`
namespace.
The provided resource is a Python class that implements Twisted's [IResource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.IResource.html)
interface (such as [Resource](https://docs.twistedmatrix.com/en/stable/api/twisted.web.resource.Resource.html)).
The provided resource is a Python class that implements Twisted's [IResource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.IResource.html)
interface (such as [Resource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.Resource.html)).
Only one resource can be registered for a given path. If several modules attempt to
register a resource for the same path, the module that appears first in Synapse's
@@ -82,60 +82,4 @@ the callback name as the argument name and the function as its value. A
`register_[...]_callbacks` method exists for each category.
Callbacks for each category can be found on their respective page of the
[Synapse documentation website](https://matrix-org.github.io/synapse).
## Caching
_Added in Synapse 1.74.0._
Modules can leverage Synapse's caching tools to manage their own cached functions. This
can be helpful for modules that need to repeatedly request the same data from the database
or a remote service.
Functions that need to be wrapped with a cache need to be decorated with a `@cached()`
decorator (which can be imported from `synapse.module_api`) and registered with the
[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888)
API when initialising the module. If the module needs to invalidate an entry in a cache,
it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904)
API, with the function to invalidate the cache of and the key(s) of the entry to
invalidate.
Below is an example of a simple module using a cached function:
```python
from typing import Any
from synapse.module_api import cached, ModuleApi
class MyModule:
def __init__(self, config: Any, api: ModuleApi):
self.api = api
# Register the cached function so Synapse knows how to correctly invalidate
# entries for it.
self.api.register_cached_function(self.get_user_from_id)
@cached()
async def get_department_for_user(self, user_id: str) -> str:
"""A function with a cache."""
# Request a department from an external service.
return await self.http_client.get_json(
"https://int.example.com/users", {"user_id": user_id)
)["department"]
async def do_something_with_users(self) -> None:
"""Calls the cached function and then invalidates an entry in its cache."""
user_id = "@alice:example.com"
# Get the user. Since get_department_for_user is wrapped with a cache,
# the return value for this user_id will be cached.
department = await self.get_department_for_user(user_id)
# Do something with `department`...
# Let's say something has changed with our user, and the entry we have for
# them in the cache is out of date, so we want to invalidate it.
await self.api.invalidate_cache(self.get_department_for_user, (user_id,))
```
See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details.
[Synapse documentation website](https://matrix-org.github.io/synapse).

View File

@@ -88,41 +88,98 @@ oidc_providers:
display_name_template: "{{ user.name }}"
```
### Apple
### Dex
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
You will need to create a new "Services ID" for SiWA, and create and download a
private key with "SiWA" enabled.
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
to install Dex.
As well as the private key file, you will need:
* Client ID: the "identifier" you gave the "Services ID"
* Team ID: a 10-character ID associated with your developer account.
* Key ID: the 10-character identifier for the key.
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
has more information on setting up SiWA.
The synapse config will look like this:
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
```yaml
- idp_id: apple
idp_name: Apple
issuer: "https://appleid.apple.com"
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
client_auth_method: "client_secret_post"
client_secret_jwt_key:
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
jwt_header:
alg: ES256
kid: "KEYIDCODE" # Set to the 10-char Key ID
jwt_payload:
iss: TEAMIDCODE # Set to the 10-char Team ID
scopes: ["name", "email", "openid"]
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
staticClients:
- id: synapse
secret: secret
redirectURIs:
- '[synapse public baseurl]/_synapse/client/oidc/callback'
name: 'Synapse'
```
Run with `dex serve examples/config-dev.yaml`.
Synapse config:
```yaml
oidc_providers:
- idp_id: dex
idp_name: "My Dex server"
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
email_template: "{{ user.email }}"
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
```
### Keycloak
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_providers:
- idp_id: keycloak
idp_name: "My KeyCloak server"
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
backchannel_logout_enabled: true # Optional
```
### Auth0
@@ -205,43 +262,285 @@ oidc_providers:
display_name_template: "{{ user.preferred_username|capitalize }}" # TO BE FILLED: If your users have names in Authentik and you want those in Synapse, this should be replaced with user.name|capitalize.
```
### Dex
### LemonLDAP
[Dex][dex-idp] is a simple, open-source OpenID Connect Provider.
Although it is designed to help building a full-blown provider with an
external database, it can be configured with static passwords in a config file.
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
to install Dex.
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
2. The parameters are:
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
Client ID`)
- Client secret (`Options > Basic > Client secret`)
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
(`Options > Security > ID Token signature algorithm` and `Options > Security >
Access Token signature algorithm`)
- Scopes: OpenID, Email and Profile
- Allowed redirection addresses for login (`Options > Basic > Allowed
redirection addresses for login` ) :
`[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
staticClients:
- id: synapse
secret: secret
redirectURIs:
- '[synapse public baseurl]/_synapse/client/oidc/callback'
name: 'Synapse'
oidc_providers:
- idp_id: lemonldap
idp_name: lemonldap
discover: true
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
client_id: "your client id" # TO BE FILLED
client_secret: "your client secret" # TO BE FILLED
scopes:
- "openid"
- "profile"
- "email"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}}"
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
display_name_template: "{{ user.preferred_username|capitalize }}"
```
Run with `dex serve examples/config-dev.yaml`.
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
Synapse config:
```yaml
oidc_providers:
- idp_id: dex
idp_name: "My Dex server"
skip_verification: true # This is needed as Dex is served on an insecure endpoint
issuer: "http://127.0.0.1:5556/dex"
client_id: "synapse"
client_secret: "secret"
scopes: ["openid", "profile"]
- idp_id: github
idp_name: Github
idp_brand: "github" # optional: styling hint for clients
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
localpart_template: "{{ user.name }}"
display_name_template: "{{ user.name|capitalize }}"
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### Google
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
1. Set up a project in the Google API Console (see
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_providers:
- idp_id: google
idp_name: Google
idp_brand: "google" # optional: styling hint for clients
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: twitch
idp_name: Twitch
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### GitLab
1. Create a [new application](https://gitlab.com/profile/applications).
2. Add the `read_user` and `openid` scopes.
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
idp_brand: "gitlab" # optional: styling hint for clients
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
scopes: ["openid", "read_user"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: '{{ user.nickname }}'
display_name_template: '{{ user.name }}'
```
### Facebook
0. You will need a Facebook developer account. You can register for one
[here](https://developers.facebook.com/async/registration/).
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
console, "Create App", and choose "Build Connected Experiences".
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
need to go through the whole form here.
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
URL.
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
and "App Secret" for use below.
Synapse config:
```yaml
- idp_id: facebook
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
discover: false
issuer: "https://www.facebook.com"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "email"]
authorization_endpoint: "https://facebook.com/dialog/oauth"
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
Relevant documents:
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
but it has a `response_types_supported` which excludes "code" (which we rely on, and
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### Gitea
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new application.
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitea
idp_name: Gitea
discover: false
issuer: "https://your-gitea.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: [] # Gitea doesn't support Scopes
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.full_name }}"
```
### XWiki
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
Synapse config:
```yaml
oidc_providers:
- idp_id: xwiki
idp_name: "XWiki"
issuer: "https://myxwikihost/xwiki/oidc/"
client_id: "your-client-id" # TO BE FILLED
client_auth_method: none
scopes: ["openid", "profile"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Apple
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
You will need to create a new "Services ID" for SiWA, and create and download a
private key with "SiWA" enabled.
As well as the private key file, you will need:
* Client ID: the "identifier" you gave the "Services ID"
* Team ID: a 10-character ID associated with your developer account.
* Key ID: the 10-character identifier for the key.
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
has more information on setting up SiWA.
The synapse config will look like this:
```yaml
- idp_id: apple
idp_name: Apple
issuer: "https://appleid.apple.com"
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
client_auth_method: "client_secret_post"
client_secret_jwt_key:
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
jwt_header:
alg: ES256
kid: "KEYIDCODE" # Set to the 10-char Key ID
jwt_payload:
iss: TEAMIDCODE # Set to the 10-char Team ID
scopes: ["name", "email", "openid"]
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
user_mapping_provider:
config:
email_template: "{{ user.email }}"
```
### Django OAuth Toolkit
@@ -292,263 +591,6 @@ oidc_providers:
email_template: "{{ user.email }}"
```
### Facebook
0. You will need a Facebook developer account. You can register for one
[here](https://developers.facebook.com/async/registration/).
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
console, "Create App", and choose "Build Connected Experiences".
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
need to go through the whole form here.
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
URL.
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
and "App Secret" for use below.
Synapse config:
```yaml
- idp_id: facebook
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
discover: false
issuer: "https://www.facebook.com"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "email"]
authorization_endpoint: "https://facebook.com/dialog/oauth"
token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token"
jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/"
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
Relevant documents:
* [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow)
* [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/)
* [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user)
Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration),
but it has a `response_types_supported` which excludes "code" (which we rely on, and
is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)),
so we have to disable discovery and configure the URIs manually.
### GitHub
[GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but
just a regular OAuth2 provider.
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
Synapse config:
```yaml
oidc_providers:
- idp_id: github
idp_name: Github
idp_brand: "github" # optional: styling hint for clients
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
authorization_endpoint: "https://github.com/login/oauth/authorize"
token_endpoint: "https://github.com/login/oauth/access_token"
userinfo_endpoint: "https://api.github.com/user"
scopes: ["read:user"]
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.name }}"
```
### GitLab
1. Create a [new application](https://gitlab.com/profile/applications).
2. Add the `read_user` and `openid` scopes.
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
idp_brand: "gitlab" # optional: styling hint for clients
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
scopes: ["openid", "read_user"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: '{{ user.nickname }}'
display_name_template: '{{ user.name }}'
```
### Gitea
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
can be used to retrieve information on the authenticated user. As the Synapse
login mechanism needs an attribute to uniquely identify users, and that endpoint
does not return a `sub` property, an alternative `subject_claim` has to be set.
1. Create a new application.
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: gitea
idp_name: Gitea
discover: false
issuer: "https://your-gitea.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: client_secret_post
scopes: [] # Gitea doesn't support Scopes
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
user_mapping_provider:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.full_name }}"
```
### Google
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
1. Set up a project in the Google API Console (see
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
```yaml
oidc_providers:
- idp_id: google
idp_name: Google
idp_brand: "google" # optional: styling hint for clients
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
### Keycloak
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
Follow the [Getting Started Guide](https://www.keycloak.org/guides) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
2. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Client Protocol | `openid-connect` |
3. Click `Save`
4. Fill in the fields as below:
| Field | Value |
|-----------|-----------|
| Client ID | `synapse` |
| Enabled | `On` |
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
| Field | Value |
|-------|-------|
| Client Authenticator | `Client ID and Secret` |
7. Click `Regenerate Secret`
8. Copy Secret
```yaml
oidc_providers:
- idp_id: keycloak
idp_name: "My KeyCloak server"
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
client_id: "synapse"
client_secret: "copy secret generated from above"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
backchannel_logout_enabled: true # Optional
```
### LemonLDAP
[LemonLDAP::NG][lemonldap] is an open-source IdP solution.
1. Create an OpenID Connect Relying Parties in LemonLDAP::NG
2. The parameters are:
- Client ID under the basic menu of the new Relying Parties (`Options > Basic >
Client ID`)
- Client secret (`Options > Basic > Client secret`)
- JWT Algorithm: RS256 within the security menu of the new Relying Parties
(`Options > Security > ID Token signature algorithm` and `Options > Security >
Access Token signature algorithm`)
- Scopes: OpenID, Email and Profile
- Allowed redirection addresses for login (`Options > Basic > Allowed
redirection addresses for login` ) :
`[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: lemonldap
idp_name: lemonldap
discover: true
issuer: "https://auth.example.org/" # TO BE FILLED: replace with your domain
client_id: "your client id" # TO BE FILLED
client_secret: "your client secret" # TO BE FILLED
scopes:
- "openid"
- "profile"
- "email"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}}"
# TO BE FILLED: If your users have names in LemonLDAP::NG and you want those in Synapse, this should be replaced with user.name|capitalize or any valid filter.
display_name_template: "{{ user.preferred_username|capitalize }}"
```
### Mastodon
[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
@@ -569,7 +611,7 @@ You should receive a response similar to the following. Make sure to save it.
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
```
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following:
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
```yaml
oidc_providers:
@@ -585,128 +627,7 @@ oidc_providers:
scopes: ["read"]
user_mapping_provider:
config:
subject_template: "{{ user.id }}"
localpart_template: "{{ user.username }}"
display_name_template: "{{ user.display_name }}"
subject_claim: "id"
```
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
### Shibboleth with OIDC Plugin
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly.
2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret`
are randomly generated data.
```json
{
"client_id": "SOME-CLIENT-ID",
"client_secret": "SOME-SUPER-SECRET-SECRET",
"response_types": ["code"],
"grant_types": ["authorization_code"],
"scope": "openid profile email",
"redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"]
}
```
Synapse config:
```yaml
oidc_providers:
# Shibboleth IDP
#
- idp_id: shibboleth
idp_name: "Shibboleth Login"
discover: true
issuer: "https://YOUR-IDP-URL.TLD"
client_id: "YOUR_CLIENT_ID"
client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP"
scopes: ["openid", "profile", "email"]
allow_existing_users: true
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
subject_claim: "sub"
localpart_template: "{{ user.sub.split('@')[0] }}"
display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
```
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
Synapse config:
```yaml
oidc_providers:
- idp_id: twitch
idp_name: Twitch
issuer: "https://id.twitch.tv/oauth2/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
client_auth_method: "client_secret_post"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
### Twitter
*Using Twitter as an identity provider requires using Synapse 1.75.0 or later.*
1. Setup a developer account on [Twitter](https://developer.twitter.com/en/portal/dashboard)
2. Create a project & app.
3. Enable user authentication and under "Type of App" choose "Web App, Automated App or Bot".
4. Under "App info" set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
5. Obtain the OAuth 2.0 credentials under the "Keys and tokens" tab, copy the "OAuth 2.0 Client ID and Client Secret"
Synapse config:
```yaml
oidc_providers:
- idp_id: twitter
idp_name: Twitter
idp_brand: "twitter" # optional: styling hint for clients
discover: false # Twitter is not OpenID compliant.
issuer: "https://twitter.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
pkce_method: "always"
# offline.access providers refresh tokens, tweet.read and users.read needed for userinfo request.
scopes: ["offline.access", "tweet.read", "users.read"]
authorization_endpoint: https://twitter.com/i/oauth2/authorize
token_endpoint: https://api.twitter.com/2/oauth2/token
userinfo_endpoint: https://api.twitter.com/2/users/me?user.fields=profile_image_url
user_mapping_provider:
config:
subject_template: "{{ user.data.id }}"
localpart_template: "{{ user.data.username }}"
display_name_template: "{{ user.data.name }}"
picture_template: "{{ user.data.profile_image_url }}"
```
### XWiki
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
Synapse config:
```yaml
oidc_providers:
- idp_id: xwiki
idp_name: "XWiki"
issuer: "https://myxwikihost/xwiki/oidc/"
client_id: "your-client-id" # TO BE FILLED
client_auth_method: none
scopes: ["openid", "profile"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```

View File

@@ -16,7 +16,7 @@ connect to a postgres database.
- For other pre-built packages, please consult the documentation from
the relevant package.
- If you installed synapse [in a
virtualenv](setup/installation.md#installing-as-a-python-module-from-pypi), you can install
virtualenv](setup/installation.md#installing-from-source), you can install
the library with:
~/synapse/env/bin/pip install "matrix-synapse[postgres]"

View File

@@ -30,6 +30,12 @@ minimal.
See [the TCP replication documentation](tcp_replication.md).
### The Slaved DataStore
There are read-only version of the synapse storage layer in
`synapse/replication/slave/storage` that use the response of the
replication API to invalidate their caches.
### The TCP Replication Module
Information about how the tcp replication module is structured, including how
the classes interact, can be found in

View File

@@ -46,7 +46,7 @@ when using a containerized Synapse, as that will prevent it from responding
to proxied traffic.)
Optionally, you can also set
[`request_id_header`](./usage/configuration/config_documentation.md#listeners)
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
so that the server extracts and re-uses the same request ID format that the
reverse proxy is using.

View File

@@ -68,7 +68,9 @@ root:
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
# Replace "buffer" with "console" to log to stderr instead.
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
# also need to update the configuration for the `twisted` logger above, in
# this case.)
#
handlers: [buffer]

View File

@@ -26,8 +26,8 @@ for most users.
#### Docker images and Ansible playbooks
There is an official synapse image available at
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
which can be used with the docker-compose file available at
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
the docker-compose file available at
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
Further information on this including configuration options is available in the README
on hub.docker.com.
@@ -136,7 +136,7 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
#### ArchLinux
The quickest way to get up and running with ArchLinux is probably with the community package
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
@@ -278,7 +278,7 @@ Installing prerequisites on Ubuntu or Debian:
```sh
sudo apt install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev
libssl-dev virtualenv libjpeg-dev libxslt1-dev
```
##### ArchLinux
@@ -287,7 +287,7 @@ Installing prerequisites on ArchLinux:
```sh
sudo pacman -S base-devel python python-pip \
python-setuptools python-virtualenv sqlite3 icu
python-setuptools python-virtualenv sqlite3
```
##### CentOS/Fedora
@@ -297,8 +297,7 @@ Installing prerequisites on CentOS or Fedora Linux:
```sh
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
python3-virtualenv libffi-devel openssl-devel python3-devel \
libicu-devel
python3-virtualenv libffi-devel openssl-devel python3-devel
sudo dnf groupinstall "Development Tools"
```
@@ -311,12 +310,8 @@ You may need to install the latest Xcode developer tools:
xcode-select --install
```
Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) for them.
You may need to install icu, and make the icu binaries and libraries accessible.
Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
On ARM-based Macs you may also need to install libjpeg and libpq:
On ARM-based Macs you may need to install libjpeg and libpq.
You can use Homebrew (https://brew.sh):
```sh
brew install jpeg libpq
```
@@ -337,8 +332,7 @@ Installing prerequisites on openSUSE:
```sh
sudo zypper in -t pattern devel_basis
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
python-devel libffi-devel libopenssl-devel libjpeg62-devel \
libicu-devel
python-devel libffi-devel libopenssl-devel libjpeg62-devel
```
##### OpenBSD

View File

@@ -120,7 +120,7 @@ specified in the config. It is located at
## SAML Mapping Providers
The SAML mapping provider can be customized by editing the
[`saml2_config.user_mapping_provider.module`](usage/configuration/config_documentation.md#saml2_config)
[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom

View File

@@ -17,7 +17,6 @@ worker_listeners:
#
#- type: http
# port: 8035
# x_forwarded: true
# resources:
# - names: [client]

View File

@@ -1,10 +1,15 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 8083
x_forwarded: true
resources:
- names: [client, federation]

Some files were not shown because too many files have changed in this diff Show More