1
0

Compare commits

..

7 Commits

Author SHA1 Message Date
Andrew Morgan
3dcc1efc43 Move callback-related code from AccountData to its own class 2023-03-09 16:50:31 +00:00
Andrew Morgan
46c0ab559b Move callback-related code from the PasswordAuthProvider to its own class 2023-03-09 16:50:31 +00:00
Andrew Morgan
e8cdfc771b Move callback-related code from the BackgroundUpdater to its own class 2023-03-09 16:50:31 +00:00
Andrew Morgan
1b30b82ac6 Move callback-related code from the PresenceRouter to its own class 2023-03-09 16:50:31 +00:00
Andrew Morgan
266f426c50 Move callback-related code from ThirdPartyEventRules to its own class
And update the many references.
2023-03-09 16:50:31 +00:00
Andrew Morgan
c3c3c6d200 Move callback-related code from the SpamChecker to its own class
And update the many references.
2023-03-09 16:50:31 +00:00
Andrew Morgan
9cd8fecdc5 Move Account Validity callbacks to a dedicated file
Spreading module callback registration across the codebase is both a bit
messy and makes it unclear where a user should register callbacks if
they want to define a new class of callbacks.

Consolidating these under the synapse.module_api module cleans things up
a bit, puts related code in the same place and makes it much more
obvious how to extend it.
2023-03-09 16:50:31 +00:00
362 changed files with 5966 additions and 14599 deletions

View File

@@ -31,6 +31,34 @@ sed -i \
-e '/systemd/d' \
pyproject.toml
# Use poetry to do the installation. This ensures that the versions are all mutually
# compatible (as far the package metadata declares, anyway); pip's package resolver
# is more lax.
#
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
pip install toml wheel
REMOVE_DEV_DEPENDENCIES="
import toml
with open('pyproject.toml', 'r') as f:
data = toml.loads(f.read())
del data['tool']['poetry']['dev-dependencies']
with open('pyproject.toml', 'w') as f:
toml.dump(data, f)
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.3.2
poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"

View File

@@ -9,6 +9,16 @@ set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
block Set Go Version
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest

View File

@@ -129,7 +129,7 @@ body:
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level.
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data.

View File

@@ -10,7 +10,6 @@ on:
permissions:
contents: read
packages: write
jobs:
build:
@@ -35,20 +34,11 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
images: |
docker.io/matrixdotorg/synapse
ghcr.io/matrix-org/synapse
images: matrixdotorg/synapse
flavor: |
latest=false
tags: |

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
@@ -22,7 +22,7 @@ jobs:
path: book
- name: 📤 Deploy to Netlify
uses: matrix-org/netlify-pr-preview@v2
uses: matrix-org/netlify-pr-preview@v1
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}

View File

@@ -13,10 +13,25 @@ on:
workflow_dispatch:
jobs:
pre:
name: Calculate variables for GitHub Pages deployment
pages:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Figure out the target directory.
#
# The target directory depends on the name of the branch
@@ -40,65 +55,11 @@ jobs:
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
outputs:
branch-version: ${{ steps.vars.outputs.branch-version }}
################################################################################
pages-docs:
name: GitHub Pages
runs-on: ubuntu-latest
needs:
- pre
steps:
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
- name: Build the documentation
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
# However, we're using docs/README.md for other purposes and need to pick a new page
# as the default. Let's opt for the welcome page instead.
run: |
mdbook build
cp book/welcome_and_overview.html book/index.html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
destination_dir: ./${{ needs.pre.outputs.branch-version }}
################################################################################
pages-devdocs:
name: GitHub Pages (developer docs)
runs-on: ubuntu-latest
needs:
- pre
steps:
- uses: actions/checkout@v3
- name: "Set up Sphinx"
uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
groups: "dev-docs"
extras: ""
- name: Build the documentation
run: |
cd dev-docs
poetry run make html
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./dev-docs/_build/html
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
destination_dir: ./${{ steps.vars.outputs.branch-version }}

View File

@@ -27,7 +27,9 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -59,7 +61,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -130,7 +134,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
@@ -178,8 +184,6 @@ jobs:
with:
path: synapse
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh

View File

@@ -4,15 +4,13 @@ name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs). PRs skip
# building wheels on macOS & ARM.
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
merge_group:
workflow_dispatch:
concurrency:

View File

@@ -4,7 +4,6 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
merge_group:
workflow_dispatch:
concurrency:
@@ -34,9 +33,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -65,60 +61,9 @@ jobs:
- run: .ci/scripts/check_lockfile.py
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
with:
install-project: "false"
- name: Import order (isort)
run: poetry run isort --check --diff .
- name: Code style (black)
run: poetry run black --check --diff .
- name: Semantic checks (ruff)
# --quiet suppresses the update check.
run: poetry run ruff --quiet .
lint-mypy:
runs-on: ubuntu-latest
name: Typechecking
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
with:
# We want to make use of type hints in optional dependencies too.
extras: all
# We have seen odd mypy failures that were resolved when we started
# installing the project again:
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
# To make CI green, err towards caution and install the project.
install-project: "true"
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
uses: actions/cache@v3
with:
path: |
.mypy_cache
key: mypy-cache-${{ github.context.sha }}
restore-keys: mypy-cache-
- name: Run mypy
run: poetry run mypy
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
with:
typechecking-extras: "all"
lint-crlf:
runs-on: ubuntu-latest
@@ -149,9 +94,6 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
@@ -167,8 +109,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -185,7 +131,10 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
components: clippy
@@ -202,7 +151,10 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
# We use nightly so that it correctly groups together imports
toolchain: nightly-2022-12-01
@@ -216,7 +168,6 @@ jobs:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
- lint-mypy
- lint-crlf
- lint-newsfile
- lint-pydantic
@@ -268,7 +219,12 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -308,7 +264,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -321,25 +282,34 @@ jobs:
with:
python-version: '3.7'
# Calculating the old-deps actually takes a bunch of time, so we cache the
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
# otherwise the `poetry install` step will error due to the poetry.lock
# file being outdated.
#
# This caches the output of `Prepare old deps`, which should generate the
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- uses: actions/cache@v3
id: cache-poetry-old-deps
name: Cache poetry.lock
with:
path: |
poetry.lock
pyproject.toml
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
# Note: we install using `pip` here, not poetry. `poetry install` ignores the
# build-system section (https://github.com/python-poetry/poetry/issues/6154), but
# we explicitly want to test that you can `pip install` using the oldest version
# of poetry-core and setuptools-rust.
- run: pip install .[all,test]
# We only now install poetry so that `setup-python-poetry` caches the
# right poetry.lock's dependencies.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
poetry-version: "1.3.2"
extras: "all test"
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
# don't do this then python can't find the native lib.
- run: rm -rf synapse/
# Sanity check we can import/run Synapse
- run: python -m synapse.app.homeserver --help
- run: python -m twisted.trial -j6 tests
- run: poetry run trial -j6 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -415,7 +385,12 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -555,11 +530,14 @@ jobs:
path: synapse
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -583,7 +561,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
@@ -601,7 +584,10 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2

View File

@@ -5,13 +5,6 @@ on:
- cron: 0 8 * * *
workflow_dispatch:
inputs:
twisted_ref:
description: Commit, branch or tag to checkout from upstream Twisted.
required: false
default: 'trunk'
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -25,7 +18,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -34,7 +29,7 @@ jobs:
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
@@ -48,7 +43,9 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -85,7 +82,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -141,8 +140,6 @@ jobs:
with:
path: synapse
- uses: actions/setup-go@v4
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh

9
.gitignore vendored
View File

@@ -15,10 +15,9 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want poetry, cargo and flake lockfiles.
# We do want the poetry and cargo lockfile.
!poetry.lock
!Cargo.lock
!flake.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -39,9 +38,6 @@ __pycache__/
/.envrc
.direnv/
# For nix/devenv users
.devenv/
# IDEs
/.idea/
/.ropeproject/
@@ -57,7 +53,6 @@ __pycache__/
/coverage.*
/dist/
/docs/build/
/dev-docs/_build/
/htmlcov
/pip-wheel-metadata/
@@ -66,7 +61,7 @@ book/
# complement
/complement-*
/main.tar.gz
/master.tar.gz
# rust
/target/

View File

@@ -1,439 +1,3 @@
Synapse 1.84.0 (2023-05-23)
===========================
The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information.
Bugfixes
--------
- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599))
Synapse 1.84.0rc1 (2023-05-16)
==============================
Features
--------
- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197))
- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224))
- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312))
- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516))
- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528))
- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536))
- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559))
- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569))
Bugfixes
--------
- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523))
- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555))
- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564))
- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571))
Deprecations and Removals
-------------------------
- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491))
Updates to the Docker image
---------------------------
- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567))
Improved Documentation
----------------------
- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544))
- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560))
- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587))
Internal Changes
----------------
- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025))
- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470))
- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509))
- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522))
- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527))
- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529))
- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531))
- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545))
- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534))
- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535))
- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539))
- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542))
- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543))
- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548))
- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549))
- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550))
- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551))
- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552))
- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553))
- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558))
- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562))
- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563))
- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565))
- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570))
- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576))
- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577))
- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588))
- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589))
- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590))
- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591))
- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592))
- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594))
Synapse 1.83.0 (2023-05-09)
===========================
No significant changes since 1.83.0rc1.
Synapse 1.83.0rc1 (2023-05-02)
==============================
Features
--------
- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315))
- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318))
- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344))
- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387))
- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482))
Bugfixes
--------
- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361))
- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417))
- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494))
Improved Documentation
----------------------
- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411))
- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498))
Internal Changes
----------------
- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284))
- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356))
- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418))
- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458))
- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462))
- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497))
- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468))
- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471))
- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473))
- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474))
- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475))
- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476))
- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479))
- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495))
- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507))
- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508))
- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510))
- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511))
- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512))
- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466))
Synapse 1.82.0 (2023-04-25)
===========================
No significant changes since 1.82.0rc1.
Synapse 1.82.0rc1 (2023-04-18)
==============================
Features
--------
- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333))
- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431))
- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436))
Bugfixes
--------
- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181))
- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410))
- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423))
- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425))
- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428))
Improved Documentation
----------------------
- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452))
Deprecations and Removals
-------------------------
- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405))
Internal Changes
----------------
- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372))
- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373))
- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374))
- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375))
- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376))
- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404))
- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412))
- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413))
- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414))
- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415))
- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441))
- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442))
- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443))
- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444))
- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445))
- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446))
- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447))
- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448))
- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429))
- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393))
- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394))
- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395))
- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406))
- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409))
- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421))
- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427))
- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432))
- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433))
- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435))
- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438))
- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453))
Synapse 1.81.0 (2023-04-11)
===========================
Synapse now attempts the versioned appservice paths before falling back to the
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
Usage of the legacy routes should be considered deprecated.
Additionally, Synapse has supported sending the application service access token
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
query parameter. This is insecure and should be considered deprecated.
A future version of Synapse (v1.88.0 or later) will remove support for legacy
application service routes and query parameter authorization.
No significant changes since 1.81.0rc2.
Synapse 1.81.0rc2 (2023-04-06)
==============================
Bugfixes
--------
- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391))
Internal Changes
----------------
- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403))
Synapse 1.81.0rc1 (2023-04-04)
==============================
Features
--------
- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978))
- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243))
- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321))
- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331))
- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353))
- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381))
Bugfixes
--------
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295))
- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297))
- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306))
- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309))
- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317))
- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323))
- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332))
- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349))
- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351))
- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352))
- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383))
Improved Documentation
----------------------
- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341))
- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354))
- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386))
Internal Changes
----------------
- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113))
- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336))
- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280))
- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285))
- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302))
- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303))
- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304))
- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311))
- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316))
- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319))
- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324))
- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325))
- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326))
- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327))
- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328))
- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329))
- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330))
- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334))
- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339))
- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340))
- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358))
- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369))
- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370))
- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371))
- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385))
Synapse 1.80.0 (2023-03-28)
===========================
No significant changes since 1.80.0rc2.
Synapse 1.80.0rc2 (2023-03-22)
==============================
Bugfixes
--------
- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300))
- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301))
Synapse 1.80.0rc1 (2023-03-21)
==============================
Features
--------
- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187))
- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249))
- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268))
- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195))
- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229))
Bugfixes
--------
- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756))
- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190))
- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232))
- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235))
Updates to the Docker image
---------------------------
- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239))
- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282))
Improved Documentation
----------------------
- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223))
Internal Changes
----------------
- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921))
- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974))
- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200))
- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222))
- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238))
- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237))
- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244))
- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247))
- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262))
- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266))
- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269))
- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274))
- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275))
- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293))
- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252))
- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253))
- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254))
- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255))
- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256))
- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257))
- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286))
- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287))
- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288))
- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289))
- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290))
- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291))
Synapse 1.79.0 (2023-03-14)
===========================
No significant changes since 1.79.0rc2.
Synapse 1.79.0rc2 (2023-03-13)
==============================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
Internal Changes
----------------
- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
Synapse 1.79.0rc1 (2023-03-07)
==============================
@@ -483,7 +47,7 @@ Improved Documentation
Deprecations and Removals
-------------------------
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044]
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
@@ -724,7 +288,7 @@ Those who are `poetry install`ing from source using our lockfile should ensure t
Notes on faster joins
---------------------
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
After a faster join, Synapse considers that room "partially joined". In this state, you should be able to

49
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.71"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
[[package]]
name = "arc-swap"
@@ -185,9 +185,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.52"
version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
@@ -250,7 +250,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 1.0.104",
"syn",
]
[[package]]
@@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.104",
"syn",
]
[[package]]
@@ -276,9 +276,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.26"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
@@ -294,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.7.3"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
dependencies = [
"aho-corasick",
"memchr",
@@ -305,9 +305,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.6.29"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
@@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.163"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.163"
version = "1.0.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.10",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.96"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea"
dependencies = [
"itoa",
"ryu",
@@ -375,17 +375,6 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"

View File

@@ -0,0 +1 @@
Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition.

1
changelog.d/15190.bugfix Normal file
View File

@@ -0,0 +1 @@
Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules.

1
changelog.d/15195.misc Normal file
View File

@@ -0,0 +1 @@
Improve performance of creating and authenticating events.

1
changelog.d/15200.misc Normal file
View File

@@ -0,0 +1 @@
Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key.

1
changelog.d/15223.doc Normal file
View File

@@ -0,0 +1 @@
Add a missing endpoint to the workers documentation.

View File

@@ -70,10 +70,6 @@ redis:
port: 6379
# dbid: <redis_logical_db_id>
# password: <secret_password>
# use_tls: True
# certificate_file: <path_to_certificate>
# private_key_file: <path_to_private_key>
# ca_file: <path_to_ca_certificate>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.

84
debian/changelog vendored
View File

@@ -1,87 +1,3 @@
matrix-synapse-py3 (1.84.0) stable; urgency=medium
* New Synapse release 1.84.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100
matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium
* New Synapse release 1.84.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100
matrix-synapse-py3 (1.83.0) stable; urgency=medium
* New Synapse release 1.83.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200
matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium
* New Synapse release 1.83.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100
matrix-synapse-py3 (1.82.0) stable; urgency=medium
* New Synapse release 1.82.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100
matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
* New Synapse release 1.82.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100
matrix-synapse-py3 (1.81.0) stable; urgency=medium
* New Synapse release 1.81.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100
matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium
* New Synapse release 1.81.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100
matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
* New Synapse release 1.81.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
matrix-synapse-py3 (1.80.0) stable; urgency=medium
* New Synapse release 1.80.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
* New Synapse release 1.80.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
* New Synapse release 1.80.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
matrix-synapse-py3 (1.79.0) stable; urgency=medium
* New Synapse release 1.79.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
* New Synapse release 1.79.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
* New Synapse release 1.79.0rc1.

View File

@@ -46,7 +46,7 @@ for port in 8080 8081 8082; do
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces.
# Please don't accidentally bork me with your fancy settings.
# Please don't accidentaly bork me with your fancy settings.
listeners=$(cat <<-PORTLISTENERS
# Configure server to listen on both $https_port and $port
# This overides some of the default settings above
@@ -80,8 +80,12 @@ for port in 8080 8081 8082; do
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
# Request keys directly from servers contacted over federation
echo 'trusted_key_servers: []'
# Ignore keys from the trusted keys server
echo '# Ignore keys from the trusted keys server'
echo 'trusted_key_servers:'
echo ' - server_name: "matrix.org"'
echo ' accept_keys_insecurely: true'
echo ''
# Allow the servers to communicate over localhost.
allow_list=$(cat <<-ALLOW_LIST

View File

@@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@@ -1,50 +0,0 @@
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Synapse development"
copyright = "2023, The Matrix.org Foundation C.I.C."
author = "The Synapse Maintainers and Community"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"autodoc2",
"myst_parser",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for Autodoc2 ----------------------------------------------------
autodoc2_docstring_parser_regexes = [
# this will render all docstrings as 'MyST' Markdown
(r".*", "myst"),
]
autodoc2_packages = [
{
"path": "../synapse",
# Don't render documentation for everything as a matter of course
"auto_mode": False,
},
]
# -- Options for MyST (Markdown) ---------------------------------------------
# myst_heading_anchors = 2
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "furo"
html_static_path = ["_static"]

View File

@@ -1,22 +0,0 @@
.. Synapse Developer Documentation documentation master file, created by
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to the Synapse Developer Documentation!
===========================================================
.. toctree::
:maxdepth: 2
:caption: Contents:
modules/federation_sender
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -1,5 +0,0 @@
Federation Sender
=================
```{autodoc2-docstring} synapse.federation.sender
```

View File

@@ -37,24 +37,9 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential curl git libffi-dev libssl-dev pkg-config \
build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH.
# (Rust may be needed to compile `cryptography`---which is one of poetry's
# dependencies---on platforms that don't have a `cryptography` wheel.
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
RUN --mount=type=cache,target=/root/.cache/pip \

View File

@@ -51,7 +51,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# -z True if the length of string is zero.
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
export SYNAPSE_WORKER_TYPES="\
event_persister:2, \
event_persister, \
event_persister, \
background_worker, \
frontend_proxy, \
event_creator, \
@@ -63,8 +64,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
synchrotron, \
client_reader, \
appservice, \
pusher, \
stream_writers=account_data+presence+receipts+to_device+typing"
pusher"
fi
log "Workers requested: $SYNAPSE_WORKER_TYPES"

View File

@@ -6,6 +6,10 @@
worker_app: "{{ app }}"
worker_name: "{{ name }}"
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: {{ port }}

View File

@@ -19,15 +19,8 @@
# The environment variables it reads are:
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
# below. Leave empty for no workers. Add a ':' and a number at the end to
# multiply that worker. Append multiple worker types with '+' to merge the
# worker types into a single worker. Add a name and a '=' to the front of a
# worker type to give this instance a name in logs and nginx.
# Examples:
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -47,35 +40,15 @@
import os
import platform
import re
import subprocess
import sys
from collections import defaultdict
from itertools import chain
from pathlib import Path
from typing import (
Any,
Dict,
List,
Mapping,
MutableMapping,
NoReturn,
Optional,
Set,
SupportsIndex,
)
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
import yaml
from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
MAIN_PROCESS_INSTANCE_NAME = "main"
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
MAIN_PROCESS_REPLICATION_PORT = 9093
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
# during processing with the name of the worker.
WORKER_PLACEHOLDER_NAME = "placeholder_name"
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
# Watching /_matrix/client needs a "client" listener
@@ -97,13 +70,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
],
"shared_extra_conf": {
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
},
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
"worker_extra_conf": "",
},
"media_repository": {
"app": "synapse.app.generic_worker",
"app": "synapse.app.media_repository",
"listener_resources": ["media"],
"endpoint_patterns": [
"^/_matrix/media/",
@@ -116,7 +87,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
# The first configured media worker will run the media background jobs
"shared_extra_conf": {
"enable_media_repo": False,
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
"media_instance_running_background_jobs": "media_repository1",
},
"worker_extra_conf": "enable_media_repo: true",
},
@@ -124,9 +95,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
"shared_extra_conf": {
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
},
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
"worker_extra_conf": "",
},
"federation_sender": {
@@ -166,7 +135,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/versions$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
"^/_matrix/client/(r0|v3|unstable)/register$",
"^/_matrix/client/(r0|v3|unstable)/register/available$",
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
@@ -175,9 +143,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -227,9 +192,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
# This worker cannot be sharded. Therefore, there should only ever be one
# background worker. This is enforced for the safety of your database.
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
# This worker cannot be sharded. Therefore there should only ever be one background
# worker, and it should be named background_worker1
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
"worker_extra_conf": "",
},
"event_creator": {
@@ -310,7 +275,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
"""
NGINX_UPSTREAM_CONFIG_BLOCK = """
upstream {upstream_worker_base_name} {{
upstream {upstream_worker_type} {{
{body}
}}
"""
@@ -361,7 +326,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
def add_worker_roles_to_shared_config(
shared_config: dict,
worker_types_set: Set[str],
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
@@ -369,36 +334,22 @@ def add_worker_roles_to_shared_config(
append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being
converted to YAML)
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
This list can be a single worker type or multiple.
shared_config: The config dict that all worker instances share (after being converted to YAML)
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
worker_name: The name of the worker instance.
worker_port: The HTTP replication port that the worker instance is listening on.
"""
# The instance_map config field marks the workers that write to various replication
# streams
# The instance_map config field marks the workers that write to various replication streams
instance_map = shared_config.setdefault("instance_map", {})
# This is a list of the stream_writers that there can be only one of. Events can be
# sharded, and therefore doesn't belong here.
singular_stream_writers = [
"account_data",
"presence",
"receipts",
"to_device",
"typing",
]
# Worker-type specific sharding config. Now a single worker can fulfill multiple
# roles, check each.
if "pusher" in worker_types_set:
# Worker-type specific sharding config
if worker_type == "pusher":
shared_config.setdefault("pusher_instances", []).append(worker_name)
if "federation_sender" in worker_types_set:
elif worker_type == "federation_sender":
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
if "event_persister" in worker_types_set:
elif worker_type == "event_persister":
# Event persisters write to the events stream, so we need to update
# the list of event stream writers
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
@@ -411,154 +362,19 @@ def add_worker_roles_to_shared_config(
"port": worker_port,
}
# Update the list of stream writers. It's convenient that the name of the worker
# type is the same as the stream to write. Iterate over the whole list in case there
# is more than one.
for worker in worker_types_set:
if worker in singular_stream_writers:
shared_config.setdefault("stream_writers", {}).setdefault(
worker, []
).append(worker_name)
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
# Update the list of stream writers
# It's convenient that the name of the worker type is the same as the stream to write
shared_config.setdefault("stream_writers", {}).setdefault(
worker_type, []
).append(worker_name)
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def merge_worker_template_configs(
existing_dict: Optional[Dict[str, Any]],
to_be_merged_dict: Dict[str, Any],
) -> Dict[str, Any]:
"""When given an existing dict of worker template configuration consisting with both
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
return new dict.
Args:
existing_dict: Either an existing worker template or a fresh blank one.
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
existing_dict.
Returns: The newly merged together dict values.
"""
new_dict: Dict[str, Any] = {}
if not existing_dict:
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
new_dict = to_be_merged_dict.copy()
else:
for i in to_be_merged_dict.keys():
if (i == "endpoint_patterns") or (i == "listener_resources"):
# merge the two lists, remove duplicates
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
elif i == "shared_extra_conf":
# merge dictionary's, the worker name will be replaced later
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
elif i == "worker_extra_conf":
# There is only one worker type that has a 'worker_extra_conf' and it is
# the media_repo. Since duplicate worker types on the same worker don't
# work, this is fine.
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
else:
# Everything else should be identical, like "app", which only works
# because all apps are now generic_workers.
new_dict[i] = to_be_merged_dict[i]
return new_dict
def insert_worker_name_for_worker_config(
existing_dict: Dict[str, Any], worker_name: str
) -> Dict[str, Any]:
"""Insert a given worker name into the worker's configuration dict.
Args:
existing_dict: The worker_config dict that is imported into shared_config.
worker_name: The name of the worker to insert.
Returns: Copy of the dict with newly inserted worker name
"""
dict_to_edit = existing_dict.copy()
for k, v in dict_to_edit["shared_extra_conf"].items():
# Only proceed if it's the placeholder name string
if v == WORKER_PLACEHOLDER_NAME:
dict_to_edit["shared_extra_conf"][k] = worker_name
return dict_to_edit
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
"""
Apply multiplier(if found) by returning a new expanded list with some basic error
checking.
Args:
worker_types: The unprocessed List of requested workers
Returns:
A new list with all requested workers expanded.
"""
# Checking performed:
# 1. if worker:2 or more is declared, it will create additional workers up to number
# 2. if worker:1, it will create a single copy of this worker as if no number was
# given
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
# scripting and automated expansion and is intended behaviour.
# 4. if worker:NaN or is a negative number, it will error and log it.
new_worker_types = []
for worker_type in worker_types:
if ":" in worker_type:
worker_type_components = split_and_strip_string(worker_type, ":", 1)
worker_count = 0
# Should only be 2 components, a type of worker(s) and an integer as a
# string. Cast the number as an int then it can be used as a counter.
try:
worker_count = int(worker_type_components[1])
except ValueError:
error(
f"Bad number in worker count for '{worker_type}': "
f"'{worker_type_components[1]}' is not an integer"
)
# As long as there are more than 0, we add one to the list to make below.
for _ in range(worker_count):
new_worker_types.append(worker_type_components[0])
else:
# If it's not a real worker_type, it will error out later.
new_worker_types.append(worker_type)
return new_worker_types
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
"""Helper to check to make sure worker types that cannot have multiples do not.
Args:
worker_type: The type of worker to check against.
Returns: True if allowed, False if not
"""
return worker_type not in [
"background_worker",
"account_data",
"presence",
"receipts",
"typing",
"to_device",
]
def split_and_strip_string(
given_string: str, split_char: str, max_split: SupportsIndex = -1
) -> List[str]:
"""
Helper to split a string on split_char and strip whitespace from each end of each
element.
Args:
given_string: The string to split
split_char: The character to split the string on
max_split: kwarg for split() to limit how many times the split() happens
Returns:
A List of strings
"""
# Removes whitespace from ends of result strings before adding to list. Allow for
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def generate_base_homeserver_config() -> None:
@@ -573,157 +389,33 @@ def generate_base_homeserver_config() -> None:
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def parse_worker_types(
requested_worker_types: List[str],
) -> Dict[str, Set[str]]:
"""Read the desired list of requested workers and prepare the data for use in
generating worker config files while also checking for potential gotchas.
Args:
requested_worker_types: The list formed from the split environment variable
containing the unprocessed requests for workers.
Returns: A dict of worker names to set of worker types. Format:
{'worker_name':
{'worker_type', 'worker_type2'}
}
"""
# A counter of worker_base_name -> int. Used for determining the name for a given
# worker when generating its config file, as each worker's name is just
# worker_base_name followed by instance number
worker_base_name_counter: Dict[str, int] = defaultdict(int)
# Similar to above, but more finely grained. This is used to determine we don't have
# more than a single worker for cases where multiples would be bad(e.g. presence).
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
# The final result of all this processing
dict_to_return: Dict[str, Set[str]] = {}
# Handle any multipliers requested for given workers.
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
requested_worker_types
)
# Process each worker_type_string
# Examples of expected formats:
# - requested_name=type1+type2+type3
# - synchrotron
# - event_creator+event_persister
for worker_type_string in multiple_processed_worker_types:
# First, if a name is requested, use that — otherwise generate one.
worker_base_name: str = ""
if "=" in worker_type_string:
# Split on "=", remove extra whitespace from ends then make list
worker_type_split = split_and_strip_string(worker_type_string, "=")
if len(worker_type_split) > 2:
error(
"There should only be one '=' in the worker type string. "
f"Please fix: {worker_type_string}"
)
# Assign the name
worker_base_name = worker_type_split[0]
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
# Apply a fairly narrow regex to the worker names. Some characters
# aren't safe for use in file paths or nginx configurations.
# Don't allow to end with a number because we'll add a number
# ourselves in a moment.
error(
"Invalid worker name; please choose a name consisting of "
"alphanumeric letters, _ + -, but not ending with a digit: "
f"{worker_base_name!r}"
)
# Continue processing the remainder of the worker_type string
# with the name override removed.
worker_type_string = worker_type_split[1]
# Split the worker_type_string on "+", remove whitespace from ends then make
# the list a set so it's deduplicated.
worker_types_set: Set[str] = set(
split_and_strip_string(worker_type_string, "+")
)
if not worker_base_name:
# No base name specified: generate one deterministically from set of
# types
worker_base_name = "+".join(sorted(worker_types_set))
# At this point, we have:
# worker_base_name which is the name for the worker, without counter.
# worker_types_set which is the set of worker types for this worker.
# Validate worker_type and make sure we don't allow sharding for a worker type
# that doesn't support it. Will error and stop if it is a problem,
# e.g. 'background_worker'.
for worker_type in worker_types_set:
# Verify this is a real defined worker type. If it's not, stop everything so
# it can be fixed.
if worker_type not in WORKERS_CONFIG:
error(
f"{worker_type} is an unknown worker type! Was found in "
f"'{worker_type_string}'. Please fix!"
)
if worker_type in worker_type_shard_counter:
if not is_sharding_allowed_for_worker_type(worker_type):
error(
f"There can be only a single worker with {worker_type} "
"type. Please recount and remove."
)
# Not in shard counter, must not have seen it yet, add it.
worker_type_shard_counter[worker_type] += 1
# Generate the number for the worker using incrementing counter
worker_base_name_counter[worker_base_name] += 1
worker_number = worker_base_name_counter[worker_base_name]
worker_name = f"{worker_base_name}{worker_number}"
if worker_number > 1:
# If this isn't the first worker, check that we don't have a confusing
# mixture of worker types with the same base name.
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
if first_worker_with_base_name != worker_types_set:
error(
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
f"{worker_types_set!r}. It is already in use by "
f"worker_type(s): {first_worker_with_base_name!r}"
)
dict_to_return[worker_name] = worker_types_set
return dict_to_return
def generate_worker_files(
environ: Mapping[str, str],
config_path: str,
data_dir: str,
requested_worker_types: Dict[str, Set[str]],
environ: Mapping[str, str], config_path: str, data_dir: str
) -> None:
"""Read the desired workers(if any) that is passed in and generate shared
homeserver, nginx and supervisord configs.
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
requested_worker_types: A Dict containing requested workers in the format of
{'worker_name1': {'worker_type', ...}}
"""
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
# shared_config is the contents of a Synapse config file that will be shared amongst
# the main Synapse process as well as all workers.
# It is intended mainly for disabling functionality when certain workers are spun up,
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result to the shared
# config file.
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources": [{"names": ["replication"]}],
}
@@ -735,9 +427,9 @@ def generate_worker_files(
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
# base shared worker jinja2 template. This config file will be passed to all
# workers, included Synapse's main process. It is intended mainly for disabling
# functionality when certain workers are spun up, and adding a replication listener.
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# List of dicts that describe workers.
@@ -745,20 +437,31 @@ def generate_worker_files(
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
# Upstreams for load-balancing purposes. This dict takes the form of the worker
# type to the ports of each worker. For example:
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
# will be placed after the proxy_pass directive. The main benefit to representing
# this data as a dict over a str is that we can easily deduplicate endpoints
# across multiple instances of the same worker. The final rendering will be combined
# with nginx_upstreams and placed in /etc/nginx/conf.d.
nginx_locations: Dict[str, str] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
# dict over a str is that we can easily deduplicate endpoints across multiple instances
# of the same worker.
#
# An nginx site config that will be amended to depending on the workers that are
# spun up. To be placed in /etc/nginx/conf.d.
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
if not worker_types_env:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma, ignoring whitespace.
worker_types = [x.strip() for x in worker_types_env.split(",")]
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -766,57 +469,66 @@ def generate_worker_files(
# Start worker ports from this arbitrary port
worker_port = 18009
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
# Get the set of all worker types that we have configured
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
# Map locations to upstreams (corresponding to worker types) in Nginx
# but only if we use the appropriate worker type
for worker_type in all_worker_types_in_use:
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
error(worker_type + " is an unknown worker type! Please fix!")
# For each worker type specified by the user, create config values and write it's
# yaml config file
for worker_name, worker_types_set in requested_worker_types.items():
# The collected and processed data will live here.
worker_config: Dict[str, Any] = {}
# Merge all worker config templates for this worker into a single config
for worker_type in worker_types_set:
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
# Merge worker type template configuration data. It's a combination of lists
# and dicts, so use this helper.
worker_config = merge_worker_template_configs(
worker_config, copy_of_template_config
)
# Replace placeholder names in the config template with the actual worker name.
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
# Name workers by their type concatenated with an incrementing number
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
# Update the shared config with any worker_type specific options. The first of a
# given worker_type needs to stay assigned and not be replaced.
worker_config["shared_extra_conf"].update(shared_config)
shared_config = worker_config["shared_extra_conf"]
# Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
shared_config, worker_types_set, worker_name, worker_port
shared_config, worker_type, worker_name, worker_port
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
# Determine whether we need to load-balance this worker
if worker_type_total_count > 1:
# Create or add to a load-balanced upstream for this worker
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
# Upstreams are named after the worker_type
upstream = "http://" + worker_type
else:
upstream = "http://localhost:%d" % (worker_port,)
# Note that this endpoint should proxy to this upstream
nginx_locations[pattern] = upstream
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Then a worker config file
@@ -827,10 +539,6 @@ def generate_worker_files(
worker_log_config_filepath=log_config_filepath,
)
# Save this worker's port number to the correct nginx upstreams
for worker_type in worker_types_set:
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
worker_port += 1
# Build the nginx location config blocks
@@ -843,14 +551,15 @@ def generate_worker_files(
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
body += " server localhost:%d;\n" % (port,)
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
upstream_worker_base_name=upstream_worker_base_name,
upstream_worker_type=upstream_worker_type,
body=body,
)
@@ -871,15 +580,7 @@ def generate_worker_files(
if reg_path.suffix.lower() in (".yaml", ".yml")
]
workers_in_use = len(requested_worker_types) > 0
# If there are workers, add the main process to the instance_map too.
if workers_in_use:
instance_map = shared_config.setdefault("instance_map", {})
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
workers_in_use = len(worker_types) > 0
# Shared homeserver config
convert(
@@ -977,26 +678,13 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
generate_base_homeserver_config()
else:
log("Base homeserver config exists—not regenerating")
# This script may be run multiple times (mostly by Complement, see note at top of
# file). Don't re-configure workers in this instance.
# This script may be run multiple times (mostly by Complement, see note at top of file).
# Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
if not os.path.exists(mark_filepath):
# Collect and validate worker_type requests
# Read the desired worker configuration from the environment
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
# Only process worker_types if they exist
if not worker_types_env:
# No workers, just the main process
worker_types = []
requested_worker_types: Dict[str, Any] = {}
else:
# Split type names by comma, ignoring whitespace.
worker_types = split_and_strip_string(worker_types_env, ",")
requested_worker_types = parse_worker_types(worker_types)
# Always regenerate all other config files
log("Generating worker config files")
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
generate_worker_files(environ, config_path, data_dir)
# Mark workers as being configured
with open(mark_filepath, "w") as f:

View File

@@ -57,7 +57,6 @@
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Event Reports](admin_api/event_reports.md)
- [Experimental Features](admin_api/experimental_features.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
- [Register Users](admin_api/register_api.md)

View File

@@ -1,55 +0,0 @@
# Experimental Features API
This API allows a server administrator to enable or disable some experimental features on a per-user
basis. The currently supported features are:
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
presence state enabled
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
UIA when first uploading cross-signing keys.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).
## Enabling/Disabling Features
This API allows a server administrator to enable experimental features for a given user. The request must
provide a body containing the user id and listing the features to enable/disable in the following format:
```json
{
"features": {
"msc3026":true,
"msc3881":true
}
}
```
where true is used to enable the feature, and false is used to disable the feature.
The API is:
```
PUT /_synapse/admin/v1/experimental_features/<user_id>
```
## Listing Enabled Features
To list which features are enabled/disabled for a given user send a request to the following API:
```
GET /_synapse/admin/v1/experimental_features/<user_id>
```
It will return a list of possible features and indicate whether they are enabled or disabled for the
user like so:
```json
{
"features": {
"msc3026": true,
"msc3881": false,
"msc3967": false
}
}
```

View File

@@ -81,52 +81,3 @@ The following fields are returned in the JSON response body:
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
* `next_token` - integer - Opaque value used for pagination. See above.
* `total` - integer - Total number of users after filtering.
# Get largest rooms by size in database
Returns the 10 largest rooms and an estimate of how much space in the database
they are taking.
This does not include the size of any associated media associated with the room.
Returns an error on SQLite.
*Note:* This uses the planner statistics from PostgreSQL to do the estimates,
which means that the returned information can vary widely from reality. However,
it should be enough to get a rough idea of where database disk space is going.
The API is:
```
GET /_synapse/admin/v1/statistics/database/rooms
```
A response body like the following is returned:
```json
{
"rooms": [
{
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
"estimated_size": 47325417353
}
],
}
```
**Response**
The following fields are returned in the JSON response body:
* `rooms` - An array of objects, sorted by largest room first. Objects contain
the following fields:
- `room_id` - string - The room ID.
- `estimated_size` - integer - Estimated disk space used in bytes by the room
in the database.
*Added in Synapse 1.83.0*

View File

@@ -62,7 +62,7 @@ URL parameters:
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
## Create or modify account
## Create or modify Account
This API allows an administrator to create or modify a user account with a
specific `user_id`.
@@ -78,29 +78,28 @@ with a body of:
```json
{
"password": "user_password",
"logout_devices": false,
"displayname": "Alice Marigold",
"avatar_url": "mxc://example.com/abcde12345",
"displayname": "User",
"threepids": [
{
"medium": "email",
"address": "alice@example.com"
"address": "<user_mail_1>"
},
{
"medium": "email",
"address": "alice@domain.org"
"address": "<user_mail_2>"
}
],
"external_ids": [
{
"auth_provider": "example",
"external_id": "12345"
"auth_provider": "<provider1>",
"external_id": "<user_id_provider_1>"
},
{
"auth_provider": "example2",
"external_id": "abc54321"
"auth_provider": "<provider2>",
"external_id": "<user_id_provider_2>"
}
],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false,
"user_type": null
@@ -113,51 +112,41 @@ Returns HTTP status code:
URL parameters:
- `user_id` - A fully-qualified user id. For example, `@user:server.com`.
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
Body parameters:
- `password` - **string**, optional. If provided, the user's password is updated and all
- `password` - string, optional. If provided, the user's password is updated and all
devices are logged out, unless `logout_devices` is set to `false`.
- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
logged out even when `password` is provided.
- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name
will be removed.
- `avatar_url` - **string**, optional. Must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
If set to an empty string (`""`), the user's avatar is removed.
- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are
entirely replaced with the given list. Each item in the array is an object with the following
fields:
- `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number).
- `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or
`447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number
with country code "1") for `msisdn`.
Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove
that threepid from any identity servers it is aware has a binding for it.
- `external_ids` - **array**, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). More details are in the configuration manual under the
- `displayname` - string, optional, defaults to the value of `user_id`.
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
- `address` - string. Value of third-party ID.
belonging to a user.
- `external_ids` - array, optional. Allow setting the identifier of the external identity
provider for SSO (Single sign-on). Details in the configuration manual under the
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
provided value is not in the homeserver configuration.
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
When the user logs in to the identity provider, this must be the unique ID that they map to.
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
granting them access to the Admin API, among other things.
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
in the homeserver configuration. Note that no error is raised if the provided
value is not in the homeserver configuration.
- `external_id` - string, user ID in the external identity provider.
- `avatar_url` - string, optional, must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
- `admin` - bool, optional, defaults to `false`.
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
unchanged on existing accounts and set to `false` for new accounts.
A user cannot be erased by deactivating with this API. For details on
deactivating users see [Deactivate Account](#deactivate-account).
- `user_type` - string or null, optional. If provided, the user type will be
adjusted. If `null` given, the user type will be cleared. Other
allowed options are: `bot` and `support`.
Note: the `password` field must also be set if both of the following are true:
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
- Users are allowed to set their password on this homeserver (both `password_config.enabled` and
`password_config.localdb_enabled` config options are set to `true`).
Users' passwords are wiped upon account deactivation, hence the need to set a new one here.
If the user already exists then optional parameters default to the current value.
Note: a user cannot be erased with this API. For more details on
deactivating and erasing users see [Deactivate Account](#deactivate-account).
- `user_type` - **string** or null, optional. If not provided, the user type will be
not be changed. If `null` is given, the user type will be cleared.
Other allowed options are: `bot` and `support`.
In order to re-activate an account `deactivated` must be set to `false`. If
users do not login via single-sign-on, a new `password` must be provided.
## List Accounts

View File

@@ -346,7 +346,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh

View File

@@ -155,11 +155,43 @@ def run_upgrade(
Boolean columns require special treatment, since SQLite treats booleans the
same as integers.
Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
There are three separate aspects to this:
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
the integer value from SQLite to a boolean before writing the value to the
postgres database.
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
supported. This makes it necessary to avoid using `TRUE` and `FALSE`
constants in SQL commands.
For example, to insert a `TRUE` value into the database, write:
```python
txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, ))
```
* Default values for new boolean columns present a particular
difficulty. Generally it is best to create separate schema files for
Postgres and SQLite. For example:
```sql
# in 00delta.sql.postgres:
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE;
```
```sql
# in 00delta.sql.sqlite:
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0;
```
Note that there is a particularly insidious failure mode here: the Postgres
flavour will be accepted by SQLite 3.22, but will give a column whose
default value is the **string** `"FALSE"` - which, when cast back to a boolean
in Python, evaluates to `True`.
## `event_id` global uniqueness

View File

@@ -103,9 +103,6 @@ Called during a logout request for a user. It is passed the qualified user ID, t
deactivated device (if any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them
to still be present.
If multiple modules implement this callback, Synapse runs them all in order.
### `get_username_for_registration`

View File

@@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it.
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
```
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following:
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
```yaml
oidc_providers:
@@ -585,9 +585,7 @@ oidc_providers:
scopes: ["read"]
user_mapping_provider:
config:
subject_template: "{{ user.id }}"
localpart_template: "{{ user.username }}"
display_name_template: "{{ user.display_name }}"
subject_claim: "id"
```
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.

View File

@@ -26,8 +26,8 @@ for most users.
#### Docker images and Ansible playbooks
There is an official synapse image available at
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
which can be used with the docker-compose file available at
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
the docker-compose file available at
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
Further information on this including configuration options is available in the README
on hub.docker.com.

View File

@@ -1,6 +1,10 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8083

View File

@@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
the format of `<row>` is defined by the individual streams. The
`<instance_name>` is the name of the Synapse process that generated the data
(usually "master"). We expect an RDATA for every row in the DB.
(usually "master").
Error reporting happens by either the client or server sending an ERROR
command, and usually the connection will be closed.
@@ -107,7 +107,7 @@ reconnect, following the steps above.
If the server sends messages faster than the client can consume them the
server will first buffer a (fairly large) number of commands and then
disconnect the client. This ensures that we don't queue up an unbounded
number of commands in memory and gives us a potential opportunity to
number of commands in memory and gives us a potential oppurtunity to
squawk loudly. When/if the client recovers it can reconnect to the
server and ask for missed messages.
@@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on
connection errors.
The client should keep track of the token in the last RDATA command
received for each stream so that on reconnection it can start streaming
received for each stream so that on reconneciton it can start streaming
from the correct place. Note: not all RDATA have valid tokens due to
batching. See `RdataCommand` for more details.
@@ -188,8 +188,7 @@ client (C):
Two positions are included, the "new" position and the last position sent respectively.
This allows servers to tell instances that the positions have advanced but no
data has been written, without clients needlessly checking to see if they
have missed any updates. Instances will only fetch stuff if there is a gap between
their current position and the given last position.
have missed any updates.
#### ERROR (S, C)

View File

@@ -88,123 +88,6 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.84.0
## Deprecation of `worker_replication_*` configuration settings
When using workers,
* `worker_replication_host`
* `worker_replication_http_port`
* `worker_replication_http_tls`
should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map`
in the shared YAML configuration, using the name `main`.
The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0.
### Example change
#### Before:
Shared YAML
```yaml
instance_map:
generic_worker1:
host: localhost
port: 5678
tls: false
```
Worker YAML
```yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
worker_replication_host: localhost
worker_replication_http_port: 3456
worker_replication_http_tls: false
worker_listeners:
- type: http
port: 1234
resources:
- names: [client, federation]
- type: http
port: 5678
resources:
- names: [replication]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
```
#### After:
Shared YAML
```yaml
instance_map:
main:
host: localhost
port: 3456
tls: false
generic_worker1:
host: localhost
port: 5678
tls: false
```
Worker YAML
```yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
worker_listeners:
- type: http
port: 1234
resources:
- names: [client, federation]
- type: http
port: 5678
resources:
- names: [replication]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
```
Notes:
* `tls` is optional but mirrors the functionality of `worker_replication_http_tls`
# Upgrading to v1.81.0
## Application service path & authentication deprecations
Synapse now attempts the versioned appservice paths before falling back to the
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
Usage of the legacy routes should be considered deprecated.
Additionally, Synapse has supported sending the application service access token
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
query parameter. This is insecure and should be considered deprecated.
A future version of Synapse (v1.88.0 or later) will remove support for legacy
application service routes and query parameter authorization.
# Upgrading to v1.80.0
## Reporting events error code change
Before this update, the
[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
endpoint would return a `403` if a user attempted to report an event that they did not have access to.
This endpoint will now return a `404` in this case instead.
Clients that implement event reporting should check that their error handling code will handle this
change.
# Upgrading to v1.79.0
## The `on_threepid_bind` module callback method has been deprecated
@@ -288,17 +171,6 @@ Docker images and Debian packages need nothing specific as they already
include or specify ICU as an explicit dependency.
## User directory rebuild
Synapse 1.74 queues a background update
[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643),
in order to fix missing or erroneous entries.
When this update begins, the user directory will be cleared out and rebuilt from
scratch. User directory lookups will be incomplete until the rebuild completes.
Admins can monitor the rebuild's progress by using the
[Background update Admin API](usage/administration/admin_api/background_updates.md#status).
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed

View File

@@ -577,10 +577,6 @@ delete any device that hasn't been accessed for more than the specified amount o
Defaults to no duration, which means devices are never pruned.
**Note:** This task will always run on the main process, regardless of the value of
`run_background_tasks_on`. This is due to workers currently not having the ability to
delete devices.
Example configuration:
```yaml
delete_stale_devices_after: 1y
@@ -1525,7 +1521,7 @@ This option specifies several limits for login:
address. Defaults to `per_second: 0.003`, `burst_count: 5`.
* `account` ratelimits login requests based on the account the
client is attempting to log into. Defaults to `per_second: 0.003`,
client is attempting to log into. Defaults to `per_second: 0.03`,
`burst_count: 5`.
* `failed_attempts` ratelimits login requests based on the account the
@@ -1768,30 +1764,6 @@ Example configuration:
max_image_pixels: 35M
```
---
### `prevent_media_downloads_from`
A list of domains to never download media from. Media from these
domains that is already downloaded will not be deleted, but will be
inaccessible to users. This option does not affect admin APIs trying
to download/operate on media.
This will not prevent the listed domains from accessing media themselves.
It simply prevents users on this server from downloading media originating
from the listed servers.
This will have no effect on media originating from the local server.
This only affects media downloaded from other Matrix servers, to
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
Defaults to an empty list (nothing blocked).
Example configuration:
```yaml
prevent_media_downloads_from:
- evil.example.org
- evil2.example.org
```
---
### `dynamic_thumbnails`
Whether to generate new thumbnails on the fly to precisely match
@@ -3128,11 +3100,6 @@ Options for each entry include:
match a pre-existing account instead of failing. This could be used if
switching from password logins to OIDC. Defaults to false.
* `enable_registration`: set to 'false' to disable automatic registration of new
users. This allows the OIDC SSO flow to be limited to sign in only, rather than
automatically registering users that have a valid SSO login but do not have
a pre-registered account. Defaults to true.
* `user_mapping_provider`: Configuration for how attributes returned from a OIDC
provider are mapped onto a matrix user. This setting has the following
sub-properties:
@@ -3249,7 +3216,6 @@ oidc_providers:
userinfo_endpoint: "https://accounts.example.com/userinfo"
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
skip_verification: true
enable_registration: true
user_mapping_provider:
config:
subject_claim: "id"
@@ -3466,9 +3432,6 @@ This option has a number of sub-options. They are as follows:
user has unread messages in. Defaults to true, meaning push clients will see the number of
rooms with unread messages in them. Set to false to instead send the number
of unread messages.
* `jitter_delay`: Delays push notifications by a random amount up to the given
duration. Useful for mitigating timing attacks. Optional, defaults to no
delay. _Added in Synapse 1.84.0._
Example configuration:
```yaml
@@ -3476,7 +3439,6 @@ push:
enabled: true
include_content: false
group_unread_count_by_room: false
jitter_delay: "10s"
```
---
## Rooms
@@ -3723,16 +3685,6 @@ default_power_level_content_override:
trusted_private_chat: null
public_chat: null
```
---
### `forget_rooms_on_leave`
Set to true to automatically forget rooms for users when they leave them, either
normally or via a kick or ban. Defaults to false.
Example configuration:
```yaml
forget_rooms_on_leave: false
```
---
## Opentracing
@@ -3884,20 +3836,15 @@ federation_sender_instances:
### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured, and to the main process.
HTTP replication listener of the worker, if configured.
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`.
The main process also needs an entry on the `instance_map`, and it should be listed under
`main` **if even one other worker exists**. Ensure the port matches with what is declared
inside the `listener` block for a `replication` listener.
(The main process also needs an HTTP replication listener, but it should not be
listed in the `instance_map`.)
Example configuration:
```yaml
instance_map:
main:
host: localhost
port: 8030
worker1:
host: localhost
port: 8034
@@ -3981,16 +3928,9 @@ This setting has the following sub-options:
localhost and 6379
* `password`: Optional password if configured on the Redis instance.
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
* `use_tls`: Whether to use tls connection. Defaults to false.
* `certificate_file`: Optional path to the certificate file
* `private_key_file`: Optional path to the private key file
* `ca_file`: Optional path to the CA certificate file. Use this one or:
* `ca_path`: Optional path to the folder containing the CA certificate file
_Added in Synapse 1.78.0._
_Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_
Example configuration:
```yaml
redis:
@@ -3999,10 +3939,6 @@ redis:
port: 6379
password: <secret_password>
dbid: <dbid>
#use_tls: True
#certificate_file: <path_to_the_certificate_file>
#private_key_file: <path_to_the_private_key_file>
#ca_file: <path_to_the_ca_certificate_file>
```
---
## Individual worker configuration
@@ -4040,7 +3976,6 @@ worker_name: generic_worker1
```
---
### `worker_replication_host`
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication endpoint that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
@@ -4052,7 +3987,6 @@ worker_replication_host: 127.0.0.1
```
---
### `worker_replication_http_port`
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication port that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
@@ -4064,7 +3998,6 @@ worker_replication_http_port: 9093
```
---
### `worker_replication_http_tls`
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
Whether TLS should be used for talking to the HTTP replication port on the main
Synapse process.
@@ -4090,9 +4023,9 @@ A worker can handle HTTP requests. To do so, a `worker_listeners` option
must be declared, in the same way as the [`listeners` option](#listeners)
in the shared config.
Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map)
will need to include a `replication` listener here, in order to accept internal HTTP
requests from other workers.
Workers declared in [`stream_writers`](#stream_writers) will need to include a
`replication` listener here, in order to accept internal HTTP requests from
other workers.
Example configuration:
```yaml

View File

@@ -87,18 +87,12 @@ shared configuration file.
### Shared configuration
Normally, only a few changes are needed to make an existing configuration
file suitable for use with workers:
* First, you need to enable an
Normally, only a couple of changes are needed to make an existing configuration
file suitable for use with workers. First, you need to enable an
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
for the main process
* Secondly, you need to enable
[redis-based replication](usage/configuration/config_documentation.md#redis)
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
with the `main` process defined, as well as the relevant connection information from
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
for the main process; and secondly, you need to enable
[redis-based replication](usage/configuration/config_documentation.md#redis).
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
can be used to authenticate HTTP traffic between workers. For example:
```yaml
@@ -117,11 +111,6 @@ worker_replication_secret: ""
redis:
enabled: true
instance_map:
main:
host: 'localhost'
port: 9093
```
See the [configuration manual](usage/configuration/config_documentation.md)
@@ -141,13 +130,13 @@ In the config file for each worker, you must specify:
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
The currently available worker applications are listed [below](#available-worker-applications).
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
* The HTTP replication endpoint that it should talk to on the main synapse process
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
with an `http` listener.
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
For example:
@@ -245,8 +234,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
^/_matrix/client/(r0|v3|unstable)/capabilities$
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
@@ -258,9 +245,7 @@ information.
# Registration/login requests
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
^/_matrix/client/(r0|v3|unstable)/register$
^/_matrix/client/(r0|v3|unstable)/register/available$
^/_matrix/client/v1/register/m.login.registration_token/validity$
^/_matrix/client/(r0|v3|unstable)/password_policy$
# Event sending requests
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
@@ -336,7 +321,8 @@ load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
requests from a particular user are routed to a single instance. This can
be done in reverse proxy by extracting username part from the users access token.
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
`hash $http_authorization consistent;` which contains the users access token.
Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
@@ -345,69 +331,6 @@ when a user logs in on a new device and can be *very* resource intensive, so
isolating these requests will stop them from interfering with other users ongoing
syncs.
Example `nginx` configuration snippet that handles the cases above. This is just an
example and probably requires some changes according to your particular setup:
```nginx
# Choose sync worker based on the existence of "since" query parameter
map $arg_since $sync {
default synapse_sync;
'' synapse_initial_sync;
}
# Extract username from access token passed as URL parameter
map $arg_access_token $accesstoken_from_urlparam {
# Defaults to just passing back the whole accesstoken
default $arg_access_token;
# Try to extract username part from accesstoken URL parameter
"~syt_(?<username>.*?)_.*" $username;
}
# Extract username from access token passed as authorization header
map $http_authorization $mxid_localpart {
# Defaults to just passing back the whole accesstoken
default $http_authorization;
# Try to extract username part from accesstoken header
"~Bearer syt_(?<username>.*?)_.*" $username;
# if no authorization-header exist, try mapper for URL parameter "access_token"
"" $accesstoken_from_urlparam;
}
upstream synapse_initial_sync {
# Use the username mapper result for hash key
hash $mxid_localpart consistent;
server 127.0.0.1:8016;
server 127.0.0.1:8036;
}
upstream synapse_sync {
# Use the username mapper result for hash key
hash $mxid_localpart consistent;
server 127.0.0.1:8013;
server 127.0.0.1:8037;
server 127.0.0.1:8038;
server 127.0.0.1:8039;
}
# Sync initial/normal
location ~ ^/_matrix/client/(r0|v3)/sync$ {
proxy_pass http://$sync;
}
# Normal sync
location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ {
proxy_pass http://synapse_sync;
}
# Initial_sync
location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ {
proxy_pass http://synapse_initial_sync;
}
location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ {
proxy_pass http://synapse_initial_sync;
}
```
Federation and client requests can be balanced via simple round robin.
The inbound federation transaction request `^/_matrix/federation/v1/send/`
@@ -428,14 +351,11 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
To enable this, the worker must have:
* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
To enable this, the worker must have a
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
config.
* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well.
Note: The same worker can handle multiple streams, but unless otherwise documented,
config. The same worker can handle multiple streams, but unless otherwise documented,
each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
@@ -443,9 +363,6 @@ configuration would include:
```yaml
instance_map:
main:
host: localhost
port: 8030
event_persister1:
host: localhost
port: 8034

274
flake.lock generated
View File

@@ -1,274 +0,0 @@
{
"nodes": {
"devenv": {
"inputs": {
"flake-compat": "flake-compat",
"nix": "nix",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1683102061,
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
"owner": "cachix",
"repo": "devenv",
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "main",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1682490133,
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
"owner": "nix-community",
"repo": "fenix",
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1660459072,
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"lowdown-src": {
"flake": false,
"locked": {
"lastModified": 1633514407,
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz",
"repo": "lowdown",
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github"
},
"original": {
"owner": "kristapsdz",
"repo": "lowdown",
"type": "github"
}
},
"nix": {
"inputs": {
"lowdown-src": "lowdown-src",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1676545802,
"narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
"owner": "domenkozar",
"repo": "nix",
"rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "relaxed-flakes",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1678875422,
"narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1673800717,
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1682519441,
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1678376203,
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"nixpkgs": "nixpkgs_2",
"systems": "systems"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1682426789,
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

204
flake.nix
View File

@@ -1,204 +0,0 @@
# A nix flake that sets up a complete Synapse development environment. Dependencies
# for the SyTest (https://github.com/matrix-org/sytest) and Complement
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
# installed automatically.
#
# You must have already installed nix (https://nixos.org) on your system to use this.
# nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
# directly supported, but nix can be installed inside of WSL2 or even Docker
# containers. Please refer to https://nixos.org/download for details.
#
# You must also enable support for flakes in Nix. See the following for how to
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
#
# Usage:
#
# With nix installed, navigate to the directory containing this flake and run
# `nix develop --impure`. The `--impure` is necessary in order to store state
# locally from "services", such as PostgreSQL and Redis.
#
# You should now be dropped into a new shell with all programs and dependencies
# availabile to you!
#
# You can start up pre-configured, local PostgreSQL and Redis instances by
# running: `devenv up`. To stop them, use Ctrl-C.
#
# A PostgreSQL database called 'synapse' will be set up for you, along with
# a PostgreSQL user named 'synapse_user'.
# The 'host' can be found by running `echo $PGHOST` with the development
# shell activated. Use these values to configure your Synapse to connect
# to the local PostgreSQL database. You do not need to specify a password.
# https://matrix-org.github.io/synapse/latest/postgres
#
# All state (the venv, postgres and redis data and config) are stored in
# .devenv/state. Deleting a file from here and then re-entering the shell
# will recreate these files from scratch.
#
# You can exit the development shell by typing `exit`, or using Ctrl-D.
#
# If you would like this development environment to activate automatically
# upon entering this directory in your terminal, first install `direnv`
# (https://direnv.net/). Then run `echo 'use flake . --impure' >> .envrc` at
# the root of the Synapse repo. Finally, run `direnv allow .` to allow the
# contents of '.envrc' to run every time you enter this directory. Voilà!
{
inputs = {
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
devenv.url = "github:cachix/devenv/main";
# Rust toolchains and rust-analyzer nightly.
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
devShells = forEachSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
in {
# Everything is configured via devenv - a nix module for creating declarative
# developer environments. See https://devenv.sh/reference/options/ for a list
# of all possible options.
default = devenv.lib.mkShell {
inherit inputs pkgs;
modules = [
{
# Make use of the Starship command prompt when this development environment
# is manually activated (via `nix develop --impure`).
# See https://starship.rs/ for details on the prompt itself.
starship.enable = true;
# Configure packages to install.
# Search for package names at https://search.nixos.org/packages?channel=unstable
packages = with pkgs; [
# Native dependencies for running Synapse.
icu
libffi
libjpeg
libpqxx
libwebp
libxml2
libxslt
sqlite
# Native dependencies for unit tests (SyTest also requires OpenSSL).
openssl
xmlsec
# Native dependencies for running Complement.
olm
# For building the Synapse documentation website.
mdbook
];
# Install Python and manage a virtualenv with Poetry.
languages.python.enable = true;
languages.python.poetry.enable = true;
# Automatically activate the poetry virtualenv upon entering the shell.
languages.python.poetry.activate.enable = true;
# Install all extra Python dependencies; this is needed to run the unit
# tests and utilitise all Synapse features.
languages.python.poetry.install.arguments = ["--extras all"];
# Install the 'matrix-synapse' package from the local checkout.
languages.python.poetry.install.installRootPackage = true;
# This is a work-around for NixOS systems. NixOS is special in
# that you can have multiple versions of packages installed at
# once, including your libc linker!
#
# Some binaries built for Linux expect those to be in a certain
# filepath, but that is not the case on NixOS. In that case, we
# force compiling those binaries locally instead.
env.POETRY_INSTALLER_NO_BINARY = "ruff";
# Install dependencies for the additional programming languages
# involved with Synapse development.
#
# * Rust is used for developing and running Synapse.
# * Golang is needed to run the Complement test suite.
# * Perl is needed to run the SyTest test suite.
languages.go.enable = true;
languages.rust.enable = true;
languages.rust.version = "stable";
languages.perl.enable = true;
# Postgres is needed to run Synapse with postgres support and
# to run certain unit tests that require postgres.
services.postgres.enable = true;
# On the first invocation of `devenv up`, create a database for
# Synapse to store data in.
services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"];
services.postgres.initialDatabases = [
{ name = "synapse"; }
];
# Create a postgres user called 'synapse_user' which has ownership
# over the 'synapse' database.
services.postgres.initialScript = ''
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
'';
# Redis is needed in order to run Synapse in worker mode.
services.redis.enable = true;
# Define the perl modules we require to run SyTest.
#
# This list was compiled by cross-referencing https://metacpan.org/
# with the modules defined in './cpanfile' and then finding the
# corresponding nix packages on https://search.nixos.org/packages.
#
# This was done until `./install-deps.pl --dryrun` produced no output.
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
DBI
ClassMethodModifiers
CryptEd25519
DataDump
DBDPg
DigestHMAC
DigestSHA1
EmailAddressXS
EmailMIME
EmailSimple # required by Email::Mime
EmailMessageID # required by Email::Mime
EmailMIMEContentType # required by Email::Mime
TextUnidecode # required by Email::Mime
ModuleRuntime # required by Email::Mime
EmailMIMEEncodings # required by Email::Mime
FilePath
FileSlurper
Future
GetoptLong
HTTPMessage
IOAsync
IOAsyncSSL
IOSocketSSL
NetSSLeay
JSON
ListUtilsBy
ScalarListUtils
ModulePluggable
NetAsyncHTTP
MetricsAny # required by Net::Async::HTTP
NetAsyncHTTPServer
StructDumb
URI
YAMLLibYAML
]}";
}
];
};
});
};
}

View File

@@ -21,7 +21,26 @@ files =
tests/,
build_rust.py
# Note: Better exclusion syntax coming in mypy > 0.910
# https://github.com/python/mypy/pull/11329
#
# For now, set the (?x) flag enable "verbose" regexes
# https://docs.python.org/3/library/re.html#re.X
exclude = (?x)
^(
|synapse/storage/databases/__init__.py
|synapse/storage/databases/main/cache.py
|synapse/storage/schema/
)$
[mypy-synapse.federation.transport.client]
disallow_untyped_defs = False
[mypy-synapse.http.matrixfederationclient]
disallow_untyped_defs = False
[mypy-synapse.metrics._reactor_metrics]
disallow_untyped_defs = False
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
# See https://github.com/matrix-org/synapse/pull/11771.
warn_unused_ignores = False
@@ -29,6 +48,9 @@ warn_unused_ignores = False
[mypy-synapse.util.caches.treecache]
disallow_untyped_defs = False
[mypy-synapse.storage.database]
disallow_untyped_defs = False
[mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False
@@ -52,6 +74,11 @@ ignore_missing_imports = True
[mypy-msgpack]
ignore_missing_imports = True
# Note: WIP stubs available at
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
[mypy-netaddr]
ignore_missing_imports = True
[mypy-parameterized.*]
ignore_missing_imports = True

2197
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.84.0"
version = "1.79.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -153,13 +153,15 @@ python = "^3.7.1"
# ----------------------
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
immutabledict = ">=2.0"
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this
# because frozendict has been more trouble than it's worth; we would like to move to immutabledict.
frozendict = ">=1,!=2.1.2,<2.3.5"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
# We require 2.0.0 for immutabledict support.
canonicaljson = "^2.0.0"
# We require 1.5.0 to work around an issue when running against the C implementation of
# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36
canonicaljson = "^1.5.0"
# we use the type definitions added in signedjson 1.1.
signedjson = "^1.1.0"
# validating SSL certs for IP addresses requires service_identity 18.1.
@@ -311,7 +313,7 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
ruff = "0.0.265"
ruff = "0.0.252"
# Typechecking
mypy = "*"
@@ -319,7 +321,6 @@ mypy-zope = "*"
types-bleach = ">=4.1.0"
types-commonmark = ">=0.9.2"
types-jsonschema = ">=3.2.0"
types-netaddr = ">=0.8.0.6"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
@@ -350,25 +351,13 @@ towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
# Dependencies for building the development documentation
[tool.poetry.group.dev-docs]
optional = true
[tool.poetry.group.dev-docs.dependencies]
sphinx = {version = "^6.1", python = "^3.8"}
sphinx-autodoc2 = {version = "^0.4.2", python = "^3.8"}
myst-parser = {version = "^1.0.0", python = "^3.8"}
furo = ">=2022.12.7,<2024.0.0"
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
# #13849 and #14079 where we see buildtime or runtime errors caused by build
# system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"

View File

@@ -57,7 +57,7 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
priority_class: 5,
conditions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: false,
}];
@@ -88,7 +88,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
pattern: Cow::Borrowed("m.notice"),
},
))]),
actions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
@@ -122,7 +122,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
pattern: Cow::Borrowed("m.room.member"),
},
))]),
actions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},

View File

@@ -140,7 +140,7 @@ impl PushRuleEvaluator {
/// name.
///
/// Returns the set of actions, if any, that match (filtering out any
/// `dont_notify` and `coalesce` actions).
/// `dont_notify` actions).
pub fn run(
&self,
push_rules: &FilteredPushRules,
@@ -198,9 +198,8 @@ impl PushRuleEvaluator {
let actions = push_rule
.actions
.iter()
// Filter out "dont_notify" and "coalesce" actions, as we don't store them
// (since they result in no action by the pushers).
.filter(|a| **a != Action::DontNotify && **a != Action::Coalesce)
// Filter out "dont_notify" actions, as we don't store them.
.filter(|a| **a != Action::DontNotify)
.cloned()
.collect();

View File

@@ -164,12 +164,10 @@ impl PushRule {
/// The "action" Synapse should perform for a matching push rule.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action {
Notify,
SetTweak(SetTweak),
// Legacy actions that should be understood, but are equivalent to no-ops.
DontNotify,
Notify,
Coalesce,
SetTweak(SetTweak),
// An unrecognized custom action.
Unknown(Value),
@@ -570,10 +568,7 @@ impl FilteredPushRules {
.filter(|rule| {
// Ignore disabled experimental push rules
if !self.msc1767_enabled
&& (rule.rule_id.contains("org.matrix.msc1767")
|| rule.rule_id.contains("org.matrix.msc3933"))
{
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
return false;
}

View File

@@ -28,7 +28,6 @@ DISTS = (
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
"ubuntu:lunar", # 23.04 (EOL 2024-01)
)
DESC = """\

View File

@@ -40,32 +40,10 @@ def main(force_colors: bool) -> None:
exec(r, locals)
current_schema_version = locals["SCHEMA_VERSION"]
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
# Get the schema version of the local file to check against current schema on develop
with open("synapse/storage/schema/__init__.py", "r") as file:
local_schema = file.read()
new_locals: Dict[str, Any] = {}
exec(local_schema, new_locals)
local_schema_version = new_locals["SCHEMA_VERSION"]
if local_schema_version != current_schema_version:
# local schema version must be +/-1 the current schema version on develop
if abs(local_schema_version - current_schema_version) != 1:
click.secho(
"The proposed schema version has diverged more than one version from develop, please fix!",
fg="red",
bold=True,
color=force_colors,
)
click.get_current_context().exit(1)
# right, we've changed the schema version within the allowable tolerance so
# let's now use the local version as the canonical version
current_schema_version = local_schema_version
click.secho(f"Current schema version: {current_schema_version}")
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
seen_deltas = False
bad_files = []
for diff in diffs:

View File

@@ -11,11 +11,6 @@
# filepath of a local Complement checkout or by setting the COMPLEMENT_REF
# environment variable to pull a different branch or commit.
#
# To use the 'podman' command instead 'docker', set the PODMAN environment
# variable. Example:
#
# PODMAN=1 ./complement.sh
#
# By default Synapse is run in monolith mode. This can be overridden by
# setting the WORKERS environment variable.
#
@@ -35,6 +30,7 @@
# Exit if a line returns a non-zero exit code
set -e
# Helper to emit annotations that collapse portions of the log in GitHub Actions
echo_if_github() {
if [[ -n "$GITHUB_WORKFLOW" ]]; then
@@ -104,16 +100,6 @@ done
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
# Determine whether to use the docker or podman container runtime.
if [ -n "$PODMAN" ]; then
export CONTAINER_RUNTIME=podman
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
export BUILDAH_FORMAT=docker
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
else
export CONTAINER_RUNTIME=docker
fi
# Change to the repository root
cd "$(dirname $0)/.."
@@ -140,16 +126,16 @@ if [ -n "$use_editable_synapse" ]; then
editable_mount="$(realpath .):/editable-src:z"
if [ -n "$rebuild_editable_synapse" ]; then
unset skip_docker_build
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
elif docker inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# First set up the module in the right place for an editable installation.
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
&& docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
@@ -163,25 +149,25 @@ if [ -z "$skip_docker_build" ]; then
# Build a special image designed for use in development with editable
# installs.
$CONTAINER_RUNTIME build -t synapse-editable \
docker build -t synapse-editable \
-f "docker/editable.Dockerfile" .
$CONTAINER_RUNTIME build -t synapse-workers-editable \
docker build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build -t complement-synapse-editable \
docker build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
# Prepare the Rust module
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
docker build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
@@ -189,12 +175,12 @@ if [ -z "$skip_docker_build" ]; then
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
docker build -t complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"

View File

@@ -91,7 +91,6 @@ else
"synapse" "docker" "tests"
"scripts-dev"
"contrib" "synmark" "stubs" ".ci"
"dev-docs"
)
fi
fi

View File

@@ -280,7 +280,7 @@ def _prepare() -> None:
)
print("Opening the changelog in your browser...")
print("Please ask #synapse-dev to give it a check.")
print("Please ask others to give it a check.")
click.launch(
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
)

39
stubs/frozendict.pyi Normal file
View File

@@ -0,0 +1,39 @@
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stub for frozendict.
from __future__ import annotations
from typing import Any, Hashable, Iterable, Iterator, Mapping, Tuple, TypeVar, overload
_KT = TypeVar("_KT", bound=Hashable) # Key type.
_VT = TypeVar("_VT") # Value type.
class frozendict(Mapping[_KT, _VT]):
@overload
def __init__(self, **kwargs: _VT) -> None: ...
@overload
def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
@overload
def __init__(
self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
) -> None: ...
def __getitem__(self, key: _KT) -> _VT: ...
def __contains__(self, key: Any) -> bool: ...
def copy(self, **add_or_replace: Any) -> frozendict: ...
def __iter__(self) -> Iterator[_KT]: ...
def __len__(self) -> int: ...
def __repr__(self) -> str: ...
def __hash__(self) -> int: ...

View File

@@ -17,9 +17,9 @@
""" This is an implementation of a Matrix homeserver.
"""
import json
import os
import sys
from typing import Any, Dict
from synapse.util.rust import check_rust_lib_up_to_date
from synapse.util.stringutils import strtobool
@@ -61,20 +61,11 @@ try:
except ImportError:
pass
# Teach canonicaljson how to serialise immutabledicts.
# Use the standard library json implementation instead of simplejson.
try:
from canonicaljson import register_preserialisation_callback
from immutabledict import immutabledict
from canonicaljson import set_json_library
def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]:
try:
return d._dict
except Exception:
# Paranoia: fall back to a `dict()` call, in case a future version of
# immutabledict removes `_dict` from the implementation.
return dict(d)
register_preserialisation_callback(immutabledict, _immutabledict_cb)
set_json_library(json)
except ImportError:
pass

View File

@@ -1,302 +0,0 @@
#!/usr/bin/env python
# Copyright 2022-2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, Optional, Pattern, Set, Tuple
import yaml
from synapse.config.homeserver import HomeServerConfig
from synapse.federation.transport.server import (
TransportLayerServer,
register_servlets as register_federation_servlets,
)
from synapse.http.server import HttpServer, ServletCallback
from synapse.rest import ClientRestResource
from synapse.rest.key.v2 import RemoteKey
from synapse.server import HomeServer
from synapse.storage import DataStore
logger = logging.getLogger("generate_workers_map")
class MockHomeserver(HomeServer):
DATASTORE_CLASS = DataStore # type: ignore
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
super().__init__(config.server.server_name, config=config)
self.config.worker.worker_app = worker_app
GROUP_PATTERN = re.compile(r"\(\?P<[^>]+?>(.+?)\)")
@dataclass
class EndpointDescription:
"""
Describes an endpoint and how it should be routed.
"""
# The servlet class that handles this endpoint
servlet_class: object
# The category of this endpoint. Is read from the `CATEGORY` constant in the servlet
# class.
category: Optional[str]
# TODO:
# - does it need to be routed based on a stream writer config?
# - does it benefit from any optimised, but optional, routing?
# - what 'opinionated synapse worker class' (event_creator, synchrotron, etc) does
# it go in?
class EnumerationResource(HttpServer):
"""
Accepts servlet registrations for the purposes of building up a description of
all endpoints.
"""
def __init__(self, is_worker: bool) -> None:
self.registrations: Dict[Tuple[str, str], EndpointDescription] = {}
self._is_worker = is_worker
def register_paths(
self,
method: str,
path_patterns: Iterable[Pattern],
callback: ServletCallback,
servlet_classname: str,
) -> None:
# federation servlet callbacks are wrapped, so unwrap them.
callback = getattr(callback, "__wrapped__", callback)
# fish out the servlet class
servlet_class = callback.__self__.__class__ # type: ignore
if self._is_worker and method in getattr(
servlet_class, "WORKERS_DENIED_METHODS", ()
):
# This endpoint would cause an error if called on a worker, so pretend it
# was never registered!
return
sd = EndpointDescription(
servlet_class=servlet_class,
category=getattr(servlet_class, "CATEGORY", None),
)
for pat in path_patterns:
self.registrations[(method, pat.pattern)] = sd
def get_registered_paths_for_hs(
hs: HomeServer,
) -> Dict[Tuple[str, str], EndpointDescription]:
"""
Given a homeserver, get all registered endpoints and their descriptions.
"""
enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None)
ClientRestResource.register_servlets(enumerator, hs)
federation_server = TransportLayerServer(hs)
# we can't use `federation_server.register_servlets` but this line does the
# same thing, only it uses this enumerator
register_federation_servlets(
federation_server.hs,
resource=enumerator,
ratelimiter=federation_server.ratelimiter,
authenticator=federation_server.authenticator,
servlet_groups=federation_server.servlet_groups,
)
# the key server endpoints are separate again
RemoteKey(hs).register(enumerator)
return enumerator.registrations
def get_registered_paths_for_default(
worker_app: Optional[str], base_config: HomeServerConfig
) -> Dict[Tuple[str, str], EndpointDescription]:
"""
Given the name of a worker application and a base homeserver configuration,
returns:
Dict from (method, path) to EndpointDescription
TODO Don't require passing in a config
"""
hs = MockHomeserver(base_config, worker_app)
# TODO We only do this to avoid an error, but don't need the database etc
hs.setup()
return get_registered_paths_for_hs(hs)
def elide_http_methods_if_unconflicting(
registrations: Dict[Tuple[str, str], EndpointDescription],
all_possible_registrations: Dict[Tuple[str, str], EndpointDescription],
) -> Dict[Tuple[str, str], EndpointDescription]:
"""
Elides HTTP methods (by replacing them with `*`) if all possible registered methods
can be handled by the worker whose registration map is `registrations`.
i.e. the only endpoints left with methods (other than `*`) should be the ones where
the worker can't handle all possible methods for that path.
"""
def paths_to_methods_dict(
methods_and_paths: Iterable[Tuple[str, str]]
) -> Dict[str, Set[str]]:
"""
Given (method, path) pairs, produces a dict from path to set of methods
available at that path.
"""
result: Dict[str, Set[str]] = {}
for method, path in methods_and_paths:
result.setdefault(path, set()).add(method)
return result
all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations)
reg_methods = paths_to_methods_dict(registrations)
output = {}
for path, handleable_methods in reg_methods.items():
if handleable_methods == all_possible_reg_methods[path]:
any_method = next(iter(handleable_methods))
# TODO This assumes that all methods have the same servlet.
# I suppose that's possibly dubious?
output[("*", path)] = registrations[(any_method, path)]
else:
for method in handleable_methods:
output[(method, path)] = registrations[(method, path)]
return output
def simplify_path_regexes(
registrations: Dict[Tuple[str, str], EndpointDescription]
) -> Dict[Tuple[str, str], EndpointDescription]:
"""
Simplify all the path regexes for the dict of endpoint descriptions,
so that we don't use the Python-specific regex extensions
(and also to remove needlessly specific detail).
"""
def simplify_path_regex(path: str) -> str:
"""
Given a regex pattern, replaces all named capturing groups (e.g. `(?P<blah>xyz)`)
with a simpler version available in more common regex dialects (e.g. `.*`).
"""
# TODO it's hard to choose between these two;
# `.*` is a vague simplification
# return GROUP_PATTERN.sub(r"\1", path)
return GROUP_PATTERN.sub(r".*", path)
return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()}
def main() -> None:
parser = argparse.ArgumentParser(
description=(
"Updates a synapse database to the latest schema and optionally runs background updates"
" on it."
)
)
parser.add_argument("-v", action="store_true")
parser.add_argument(
"--config-path",
type=argparse.FileType("r"),
required=True,
help="Synapse configuration file",
)
args = parser.parse_args()
# TODO
# logging.basicConfig(**logging_config)
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.config_path)
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
master_paths = get_registered_paths_for_default(None, config)
worker_paths = get_registered_paths_for_default(
"synapse.app.generic_worker", config
)
all_paths = {**master_paths, **worker_paths}
elided_worker_paths = elide_http_methods_if_unconflicting(worker_paths, all_paths)
elide_http_methods_if_unconflicting(master_paths, all_paths)
# TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT
categories_to_methods_and_paths: Dict[
Optional[str], Dict[Tuple[str, str], EndpointDescription]
] = defaultdict(dict)
for (method, path), desc in elided_worker_paths.items():
categories_to_methods_and_paths[desc.category][method, path] = desc
for category, contents in categories_to_methods_and_paths.items():
print_category(category, contents)
def print_category(
category_name: Optional[str],
elided_worker_paths: Dict[Tuple[str, str], EndpointDescription],
) -> None:
"""
Prints out a category, in documentation page style.
Example:
```
# Category name
/path/xyz
GET /path/abc
```
"""
if category_name:
print(f"# {category_name}")
else:
print("# (Uncategorised requests)")
for ln in sorted(
p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*"
):
print(ln)
print()
for ln in sorted(
f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*"
):
print(ln)
print()
if __name__ == "__main__":
main()

View File

@@ -18,7 +18,6 @@
import argparse
import curses
import logging
import os
import sys
import time
import traceback
@@ -54,12 +53,11 @@ from synapse.logging.context import (
)
from synapse.notifier import ReplicationNotifier
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
from synapse.storage.databases.main import PushRuleStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
from synapse.storage.databases.main.events_bg_updates import (
@@ -69,11 +67,7 @@ from synapse.storage.databases.main.media_repository import (
MediaRepositoryBackgroundUpdateStore,
)
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.pusher import (
PusherBackgroundUpdatesStore,
PusherWorkerStore,
)
from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
@@ -125,7 +119,6 @@ BOOLEAN_COLUMNS = {
"users": ["shadow_banned", "approved"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],
"per_user_experimental_features": ["enabled"],
}
@@ -228,14 +221,10 @@ class Store(
MainStateBackgroundUpdateStore,
UserDirectoryBackgroundUpdateStore,
EndToEndKeyBackgroundStore,
EndToEndRoomKeyBackgroundStore,
StatsStore,
AccountDataWorkerStore,
FilteringWorkerStore,
ProfileWorkerStore,
PushRuleStore,
PusherWorkerStore,
PusherBackgroundUpdatesStore,
PresenceBackgroundUpdateStore,
ReceiptsBackgroundUpdateStore,
RelationsWorkerStore,
@@ -1337,17 +1326,10 @@ def main() -> None:
filename="port-synapse.log" if args.curses else None,
)
if not os.path.isfile(args.sqlite_database):
sys.stderr.write(
"The sqlite database you specified does not exist, please check that you have the"
"correct path."
)
sys.exit(1)
sqlite_config = {
"name": "sqlite3",
"args": {
"database": args.sqlite_database,
"database": "file:{}?mode=rw".format(args.sqlite_database),
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,

View File

@@ -39,7 +39,7 @@ class AuthBlocking:
self._mau_limits_reserved_threepids = (
hs.config.server.mau_limits_reserved_threepids
)
self._is_mine_server_name = hs.is_mine_server_name
self._server_name = hs.hostname
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
async def check_auth_blocking(
@@ -77,7 +77,7 @@ class AuthBlocking:
if requester:
if requester.authenticated_entity.startswith("@"):
user_id = requester.authenticated_entity
elif self._is_mine_server_name(requester.authenticated_entity):
elif requester.authenticated_entity == self._server_name:
# We never block the server from doing actions on behalf of
# users.
return

View File

@@ -215,8 +215,6 @@ class EventContentFields:
FEDERATE: Final = "m.federate"
# The creator of the room, as used in `m.room.create` events.
#
# This is deprecated in MSC2175.
ROOM_CREATOR: Final = "creator"
# Used in m.room.guest_access events.
@@ -257,7 +255,6 @@ class AccountDataTypes:
DIRECT: Final = "m.direct"
IGNORED_USER_LIST: Final = "m.ignored_user_list"
TAG: Final = "m.tag"
PUSH_RULES: Final = "m.push_rules"
class HistoryVisibility:

View File

@@ -27,7 +27,7 @@ from synapse.util import json_decoder
if typing.TYPE_CHECKING:
from synapse.config.homeserver import HomeServerConfig
from synapse.types import JsonDict, StrCollection
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@@ -108,11 +108,6 @@ class Codes(str, Enum):
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
AS_PING_URL_NOT_SET = "M_URL_NOT_SET"
AS_PING_BAD_STATUS = "M_BAD_STATUS"
AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT"
AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED"
# Attempt to send a second annotation with the same event type & annotation key
# MSC2677
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
@@ -682,27 +677,18 @@ class FederationPullAttemptBackoffError(RuntimeError):
Attributes:
event_id: The event_id which we are refusing to pull
message: A custom error message that gives more context
retry_after_ms: The remaining backoff interval, in milliseconds
"""
def __init__(
self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int
):
event_ids = list(event_ids)
def __init__(self, event_ids: List[str], message: Optional[str]):
self.event_ids = event_ids
if message:
error_message = message
else:
error_message = (
f"Not attempting to pull event_ids={event_ids} because we already "
"tried to pull them recently (backing off)."
)
error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
super().__init__(error_message)
self.event_ids = event_ids
self.retry_after_ms = retry_after_ms
class HttpResponseException(CodeMessageException):
"""

View File

@@ -170,9 +170,11 @@ class Filtering:
result = await self.store.get_user_filter(user_localpart, filter_id)
return FilterCollection(self._hs, result)
def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]:
def add_user_filter(
self, user_localpart: str, user_filter: JsonDict
) -> Awaitable[int]:
self.check_valid_filter(user_filter)
return self.store.add_user_filter(user_id, user_filter)
return self.store.add_user_filter(user_localpart, user_filter)
# TODO(paul): surely we should probably add a delete_user_filter or
# replace_user_filter at some point? There's no REST API specified for

View File

@@ -78,10 +78,7 @@ class RoomVersion:
# MSC2209: Check 'notifications' key while verifying
# m.room.power_levels auth rules.
limit_notifications_power_levels: bool
# MSC2175: No longer include the creator in m.room.create events.
msc2175_implicit_room_creator: bool
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
# content property.
# MSC2174/MSC2176: Apply updated redaction rules algorithm.
msc2176_redaction_rules: bool
# MSC3083: Support the 'restricted' join_rule.
msc3083_join_rules: bool
@@ -96,23 +93,17 @@ class RoomVersion:
msc2716_historical: bool
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
msc2716_redactions: bool
# MSC3389: Protect relation information from redaction.
msc3389_relation_redactions: bool
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
# knocks and restricted join rules into the same join condition.
msc3787_knock_restricted_join_rule: bool
# MSC3667: Enforce integer power levels
msc3667_int_only_power_levels: bool
# MSC3821: Do not redact the third_party_invite content field for membership events.
msc3821_redaction_rules: bool
# MSC3931: Adds a push rule condition for "room version feature flags", making
# some push rules room version dependent. Note that adding a flag to this list
# is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used.
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
# MSC3989: Redact the origin field.
msc3989_redaction_rules: bool
class RoomVersions:
@@ -125,19 +116,15 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V2 = RoomVersion(
"2",
@@ -148,19 +135,15 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V3 = RoomVersion(
"3",
@@ -171,19 +154,15 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V4 = RoomVersion(
"4",
@@ -194,19 +173,15 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V5 = RoomVersion(
"5",
@@ -217,19 +192,15 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V6 = RoomVersion(
"6",
@@ -240,19 +211,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@@ -263,19 +230,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=True,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=False,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V7 = RoomVersion(
"7",
@@ -286,19 +249,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V8 = RoomVersion(
"8",
@@ -309,19 +268,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V9 = RoomVersion(
"9",
@@ -332,19 +287,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC3787 = RoomVersion(
"org.matrix.msc3787",
@@ -355,42 +306,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC3821 = RoomVersion(
"org.matrix.msc3821.opt1",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=True,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
V10 = RoomVersion(
"10",
@@ -401,19 +325,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4",
@@ -424,19 +344,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=False,
msc3375_redaction_rules=False,
msc2403_knocking=True,
msc2716_historical=True,
msc2716_redactions=True,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=False,
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
@@ -448,42 +364,15 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
msc3989_redaction_rules=False,
)
MSC3989 = RoomVersion(
"org.matrix.msc3989",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2175_implicit_room_creator=False,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3389_relation_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3821_redaction_rules=False,
msc3931_push_features=(),
msc3989_redaction_rules=True,
)
@@ -503,7 +392,6 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.MSC3787,
RoomVersions.V10,
RoomVersions.MSC2716v4,
RoomVersions.MSC3989,
)
}

View File

@@ -21,7 +21,6 @@ import socket
import sys
import traceback
import warnings
from textwrap import indent
from typing import (
TYPE_CHECKING,
Any,
@@ -42,12 +41,7 @@ from typing_extensions import ParamSpec
import twisted
from twisted.internet import defer, error, reactor as _reactor
from twisted.internet.interfaces import (
IOpenSSLContextFactory,
IReactorSSL,
IReactorTCP,
IReactorUNIX,
)
from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorSSL, IReactorTCP
from twisted.internet.protocol import ServerFactory
from twisted.internet.tcp import Port
from twisted.logger import LoggingFile, LogLevel
@@ -62,9 +56,8 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
from synapse.config.server import ListenerConfig, ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
@@ -72,7 +65,12 @@ from synapse.logging.opentracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
from synapse.module_api.callbacks.presence_router_callbacks import (
load_legacy_presence_router,
)
from synapse.module_api.callbacks.spam_checker_callbacks import (
load_legacy_spam_checkers,
)
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
load_legacy_third_party_event_rules,
)
@@ -213,12 +211,8 @@ def handle_startup_exception(e: Exception) -> NoReturn:
# Exceptions that occur between setting up the logging and forking or starting
# the reactor are written to the logs, followed by a summary to stderr.
logger.exception("Exception during startup")
error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__))
indented_error_string = indent(error_string, " ")
quit_with_error(
f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs."
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
)
@@ -363,28 +357,6 @@ def listen_tcp(
return r # type: ignore[return-value]
def listen_unix(
path: str,
mode: int,
factory: ServerFactory,
reactor: IReactorUNIX = reactor,
backlog: int = 50,
) -> List[Port]:
"""
Create a UNIX socket for a given path and 'mode' permission
Returns:
list of twisted.internet.tcp.Port listening for TCP connections
"""
wantPID = True
return [
# IReactorUNIX returns an object implementing IListeningPort from listenUNIX,
# but we know it will be a Port instance.
cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID))
]
def listen_http(
listener_config: ListenerConfig,
root_resource: Resource,
@@ -393,13 +365,18 @@ def listen_http(
context_factory: Optional[IOpenSSLContextFactory],
reactor: ISynapseReactor = reactor,
) -> List[Port]:
port = listener_config.port
bind_addresses = listener_config.bind_addresses
tls = listener_config.tls
assert listener_config.http_options is not None
site_tag = listener_config.get_site_tag()
site_tag = listener_config.http_options.tag
if site_tag is None:
site_tag = str(port)
site = SynapseSite(
"synapse.access.%s.%s"
% ("https" if listener_config.is_tls() else "http", site_tag),
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
site_tag,
listener_config,
root_resource,
@@ -407,41 +384,25 @@ def listen_http(
max_request_body_size=max_request_body_size,
reactor=reactor,
)
if isinstance(listener_config, TCPListenerConfig):
if listener_config.is_tls():
# refresh_certificate should have been called before this.
assert context_factory is not None
ports = listen_ssl(
listener_config.bind_addresses,
listener_config.port,
site,
context_factory,
reactor=reactor,
)
logger.info(
"Synapse now listening on TCP port %d (TLS)", listener_config.port
)
else:
ports = listen_tcp(
listener_config.bind_addresses,
listener_config.port,
site,
reactor=reactor,
)
logger.info("Synapse now listening on TCP port %d", listener_config.port)
if tls:
# refresh_certificate should have been called before this.
assert context_factory is not None
ports = listen_ssl(
bind_addresses,
port,
site,
context_factory,
reactor=reactor,
)
logger.info("Synapse now listening on TCP port %d (TLS)", port)
else:
ports = listen_unix(
listener_config.path, listener_config.mode, site, reactor=reactor
ports = listen_tcp(
bind_addresses,
port,
site,
reactor=reactor,
)
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
# encoded as a byte string. Decode as utf-8 so pretty.
logger.info(
"Synapse now listening on Unix Socket at: "
f"{ports[0].getHost().name.decode('utf-8')}"
)
logger.info("Synapse now listening on TCP port %d", port)
return ports

View File

@@ -38,7 +38,7 @@ from synapse.app._base import (
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig, TCPListenerConfig
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext
@@ -236,18 +236,12 @@ class GenericWorkerServer(HomeServer):
if listener.type == "http":
self._listen_http(listener)
elif listener.type == "manhole":
if isinstance(listener, TCPListenerConfig):
_base.listen_manhole(
listener.bind_addresses,
listener.port,
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
else:
raise ConfigError(
"Can not using a unix socket for manhole at this time."
)
_base.listen_manhole(
listener.bind_addresses,
listener.port,
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@@ -255,16 +249,10 @@ class GenericWorkerServer(HomeServer):
"enable_metrics is not True!"
)
else:
if isinstance(listener, TCPListenerConfig):
_base.listen_metrics(
listener.bind_addresses,
listener.port,
)
else:
raise ConfigError(
"Can not use a unix socket for metrics at this time."
)
_base.listen_metrics(
listener.bind_addresses,
listener.port,
)
else:
logger.warning("Unsupported listener type: %s", listener.type)

View File

@@ -44,7 +44,7 @@ from synapse.app._base import (
)
from synapse.config._base import ConfigError, format_config_error
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ListenerConfig, TCPListenerConfig
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import (
@@ -78,13 +78,14 @@ class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore # type: ignore
def _listener_http(
self,
config: HomeServerConfig,
listener_config: ListenerConfig,
self, config: HomeServerConfig, listener_config: ListenerConfig
) -> Iterable[Port]:
port = listener_config.port
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
site_tag = listener_config.get_site_tag()
site_tag = listener_config.http_options.tag
if site_tag is None:
site_tag = str(port)
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -251,17 +252,12 @@ class SynapseHomeServer(HomeServer):
self._listener_http(self.config, listener)
)
elif listener.type == "manhole":
if isinstance(listener, TCPListenerConfig):
_base.listen_manhole(
listener.bind_addresses,
listener.port,
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
else:
raise ConfigError(
"Can not use a unix socket for manhole at this time."
)
_base.listen_manhole(
listener.bind_addresses,
listener.port,
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@@ -269,16 +265,10 @@ class SynapseHomeServer(HomeServer):
"enable_metrics is not True!"
)
else:
if isinstance(listener, TCPListenerConfig):
_base.listen_metrics(
listener.bind_addresses,
listener.port,
)
else:
raise ConfigError(
"Can not use a unix socket for metrics at this time."
)
_base.listen_metrics(
listener.bind_addresses,
listener.port,
)
else:
# this shouldn't happen, as the listener type should have been checked
# during parsing

View File

@@ -17,8 +17,6 @@ import urllib.parse
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
@@ -26,14 +24,13 @@ from typing import (
Optional,
Sequence,
Tuple,
TypeVar,
)
from prometheus_client import Counter
from typing_extensions import Concatenate, ParamSpec, TypeGuard
from typing_extensions import TypeGuard
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException, HttpResponseException
from synapse.api.errors import CodeMessageException
from synapse.appservice import (
ApplicationService,
TransactionOneTimeKeysCount,
@@ -41,7 +38,7 @@ from synapse.appservice import (
)
from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig, serialize_event
from synapse.http.client import SimpleHttpClient, is_unknown_endpoint
from synapse.http.client import SimpleHttpClient
from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
from synapse.util.caches.response_cache import ResponseCache
@@ -81,11 +78,7 @@ sent_todevice_counter = Counter(
HOUR_IN_MS = 60 * 60 * 1000
APP_SERVICE_PREFIX = "/_matrix/app/v1"
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
P = ParamSpec("P")
R = TypeVar("R")
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
@@ -128,47 +121,6 @@ class ApplicationServiceApi(SimpleHttpClient):
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
)
async def _send_with_fallbacks(
self,
service: "ApplicationService",
prefixes: List[str],
path: str,
func: Callable[Concatenate[str, P], Awaitable[R]],
*args: P.args,
**kwargs: P.kwargs,
) -> R:
"""
Attempt to call an application service with multiple paths, falling back
until one succeeds.
Args:
service: The appliacation service, this provides the base URL.
prefixes: A last of paths to try in order for the requests.
path: A suffix to append to each prefix.
func: The function to call, the first argument will be the full
endpoint to fetch. Other arguments are provided by args/kwargs.
Returns:
The return value of func.
"""
for i, prefix in enumerate(prefixes, start=1):
uri = f"{service.url}{prefix}{path}"
try:
return await func(uri, *args, **kwargs)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised path,
# fallback to next path (if one exists). Otherwise, consider it
# a legitimate error and raise.
if i < len(prefixes) and is_unknown_endpoint(e):
continue
raise
except Exception:
# Unexpected exceptions get sent to the caller.
raise
# The function should always exit via the return or raise above this.
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
if service.url is None:
return False
@@ -176,12 +128,10 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
try:
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/users/{urllib.parse.quote(user_id)}",
self.get_json,
response = await self.get_json(
uri,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
@@ -190,9 +140,9 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
if e.code == 404:
return False
logger.warning("query_user to %s received %s", service.url, e.code)
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", service.url, ex)
logger.warning("query_user to %s threw exception %s", uri, ex)
return False
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
@@ -202,23 +152,21 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
try:
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/rooms/{urllib.parse.quote(alias)}",
self.get_json,
response = await self.get_json(
uri,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
logger.warning("query_alias to %s received %s", service.url, e.code)
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
return False
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", service.url, ex)
logger.warning("query_alias to %s threw exception %s", uri, ex)
return False
async def query_3pe(
@@ -240,24 +188,25 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
uri = "%s%s/thirdparty/%s/%s" % (
service.url,
APP_SERVICE_PREFIX,
kind,
urllib.parse.quote(protocol),
)
try:
args: Mapping[Any, Any] = {
**fields,
b"access_token": service.hs_token,
}
response = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
self.get_json,
response = await self.get_json(
uri,
args=args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r",
service.url,
response,
"query_3pe to %s returned an invalid response %r", uri, response
)
return []
@@ -267,12 +216,12 @@ class ApplicationServiceApi(SimpleHttpClient):
ret.append(r)
else:
logger.warning(
"query_3pe to %s returned an invalid result %r", service.url, r
"query_3pe to %s returned an invalid result %r", uri, r
)
return ret
except Exception as ex:
logger.warning("query_3pe to %s threw exception %s", service.url, ex)
logger.warning("query_3pe to %s threw exception %s", uri, ex)
return []
async def get_3pe_protocol(
@@ -284,20 +233,21 @@ class ApplicationServiceApi(SimpleHttpClient):
async def _get() -> Optional[JsonDict]:
# This is required by the configuration.
assert service.hs_token is not None
uri = "%s%s/thirdparty/protocol/%s" % (
service.url,
APP_SERVICE_PREFIX,
urllib.parse.quote(protocol),
)
try:
info = await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
self.get_json,
info = await self.get_json(
uri,
{"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not _is_valid_3pe_metadata(info):
logger.warning(
"query_3pe_protocol to %s did not return a valid result",
service.url,
"query_3pe_protocol to %s did not return a valid result", uri
)
return None
@@ -310,27 +260,12 @@ class ApplicationServiceApi(SimpleHttpClient):
return info
except Exception as ex:
logger.warning(
"query_3pe_protocol to %s threw exception %s", service.url, ex
)
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
return None
key = (service.id, protocol)
return await self.protocol_meta_cache.wrap(key, _get)
async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None:
# The caller should check that url is set
assert service.url is not None, "ping called without URL being set"
# This is required by the configuration.
assert service.hs_token is not None
await self.post_json_get_json(
uri=f"{service.url}{APP_SERVICE_PREFIX}/ping",
post_json={"transaction_id": txn_id},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
async def push_bulk(
self,
service: "ApplicationService",
@@ -370,6 +305,8 @@ class ApplicationServiceApi(SimpleHttpClient):
)
txn_id = 0
uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
# Never send ephemeral events to appservices that do not support it
body: JsonDict = {"events": serialized_events}
if service.supports_ephemeral:
@@ -401,11 +338,8 @@ class ApplicationServiceApi(SimpleHttpClient):
}
try:
await self._send_with_fallbacks(
service,
[APP_SERVICE_PREFIX, ""],
f"/transactions/{urllib.parse.quote(str(txn_id))}",
self.put_json,
await self.put_json(
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
headers={"Authorization": [f"Bearer {service.hs_token}"]},
@@ -413,7 +347,7 @@ class ApplicationServiceApi(SimpleHttpClient):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"push_bulk to %s succeeded! events=%s",
service.url,
uri,
[event.get("event_id") for event in events],
)
sent_transactions_counter.labels(service.id).inc()
@@ -424,7 +358,7 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
logger.warning(
"push_bulk to %s received code=%s msg=%s",
service.url,
uri,
e.code,
e.msg,
exc_info=logger.isEnabledFor(logging.DEBUG),
@@ -432,7 +366,7 @@ class ApplicationServiceApi(SimpleHttpClient):
except Exception as ex:
logger.warning(
"push_bulk to %s threw exception(%s) %s args=%s",
service.url,
uri,
type(ex).__name__,
ex,
ex.args,
@@ -441,121 +375,6 @@ class ApplicationServiceApi(SimpleHttpClient):
failed_transactions_counter.labels(service.id).inc()
return False
async def claim_client_keys(
self, service: "ApplicationService", query: List[Tuple[str, str, str, int]]
) -> Tuple[
Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
]:
"""Claim one time keys from an application service.
Note that any error (including a timeout) is treated as the application
service having no information.
Args:
service: The application service to query.
query: An iterable of tuples of (user ID, device ID, algorithm).
Returns:
A tuple of:
A map of user ID -> a map device ID -> a map of key ID -> JSON dict.
A copy of the input which has not been fulfilled because the
appservice doesn't support this endpoint or has not returned
data for that tuple.
"""
if service.url is None:
return {}, query
# This is required by the configuration.
assert service.hs_token is not None
# Create the expected payload shape.
body: Dict[str, Dict[str, List[str]]] = {}
for user_id, device, algorithm, count in query:
body.setdefault(user_id, {}).setdefault(device, []).extend(
[algorithm] * count
)
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim"
try:
response = await self.post_json_get_json(
uri,
body,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
except HttpResponseException as e:
# The appservice doesn't support this endpoint.
if is_unknown_endpoint(e):
return {}, query
logger.warning("claim_keys to %s received %s", uri, e.code)
return {}, query
except Exception as ex:
logger.warning("claim_keys to %s threw exception %s", uri, ex)
return {}, query
# Check if the appservice fulfilled all of the queried user/device/algorithms
# or if some are still missing.
#
# TODO This places a lot of faith in the response shape being correct.
missing = []
for user_id, device, algorithm, count in query:
# Count the number of keys in the response for this algorithm by
# checking which key IDs start with the algorithm. This uses that
# True == 1 in Python to generate a count.
response_count = sum(
key_id.startswith(f"{algorithm}:")
for key_id in response.get(user_id, {}).get(device, {})
)
count -= response_count
# If the appservice responds with fewer keys than requested, then
# consider the request unfulfilled.
if count > 0:
missing.append((user_id, device, algorithm, count))
return response, missing
async def query_keys(
self, service: "ApplicationService", query: Dict[str, List[str]]
) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
"""Query the application service for keys.
Note that any error (including a timeout) is treated as the application
service having no information.
Args:
service: The application service to query.
query: An iterable of tuples of (user ID, device ID, algorithm).
Returns:
A map of device_keys/master_keys/self_signing_keys/user_signing_keys:
device_keys is a map of user ID -> a map device ID -> device info.
"""
if service.url is None:
return {}
# This is required by the configuration.
assert service.hs_token is not None
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3984/keys/query"
try:
response = await self.post_json_get_json(
uri,
query,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
except HttpResponseException as e:
# The appservice doesn't support this endpoint.
if is_unknown_endpoint(e):
return {}
logger.warning("query_keys to %s received %s", uri, e.code)
return {}
except Exception as ex:
logger.warning("query_keys to %s threw exception %s", uri, ex)
return {}
return response
def _serialize(
self, service: "ApplicationService", events: Iterable[EventBase]
) -> List[JsonDict]:

View File

@@ -11,10 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Iterable, Type, TypeVar
from typing import Any, Iterable
import jsonschema
from pydantic import BaseModel, ValidationError, parse_obj_as
from synapse.config._base import ConfigError
from synapse.types import JsonDict
@@ -65,28 +64,3 @@ def json_error_to_config_error(
else:
path.append(str(p))
return ConfigError(e.message, path)
Model = TypeVar("Model", bound=BaseModel)
def parse_and_validate_mapping(
config: Any,
model_type: Type[Model],
) -> Dict[str, Model]:
"""Parse `config` as a mapping from strings to a given `Model` type.
Args:
config: The configuration data to check
model_type: The BaseModel to validate and parse against.
Returns:
Fully validated and parsed Dict[str, Model].
Raises:
ConfigError, if given improper input.
"""
try:
# type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because
# `model_type` is a runtime variable. Pydantic is fine with this.
instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type]
except ValidationError as e:
raise ConfigError(str(e)) from e
return instances

View File

@@ -33,16 +33,6 @@ class AppServiceConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.app_service_config_files = config.get("app_service_config_files", [])
if not isinstance(self.app_service_config_files, list) or not all(
type(x) is str for x in self.app_service_config_files
):
# type-ignore: this function gets arbitrary json value; we do use this path.
raise ConfigError(
"Expected '%s' to be a list of AS config files:"
% (self.app_service_config_files),
"app_service_config_files",
)
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
@@ -50,6 +40,10 @@ def load_appservices(
hostname: str, config_files: List[str]
) -> List[ApplicationService]:
"""Returns a list of Application Services from the config files."""
if not isinstance(config_files, list):
# type-ignore: this function gets arbitrary json value; we do use this path.
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
return []
# Dicts of value -> filename
seen_as_tokens: Dict[str, str] = {}

View File

@@ -74,16 +74,6 @@ class ExperimentalConfig(Config):
"msc3202_transaction_extensions", False
)
# MSC3983: Proxying OTK claim requests to exclusive ASes.
self.msc3983_appservice_otk_claims: bool = experimental.get(
"msc3983_appservice_otk_claims", False
)
# MSC3984: Proxying key queries to exclusive ASes.
self.msc3984_appservice_key_query: bool = experimental.get(
"msc3984_appservice_key_query", False
)
# MSC3706 (server-side support for partial state in /send_join responses)
# Synapse will always serve partial state responses to requests using the stable
# query parameter `omit_members`. If this flag is set, Synapse will also serve
@@ -188,19 +178,3 @@ class ExperimentalConfig(Config):
# MSC3967: Do not require UIA when first uploading cross signing keys
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
# MSC3981: Recurse relations
self.msc3981_recurse_relations = experimental.get(
"msc3981_recurse_relations", False
)
# MSC3970: Scope transaction IDs to devices
self.msc3970_enabled = experimental.get("msc3970_enabled", False)
# MSC4009: E.164 Matrix IDs
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
# MSC4010: Do not allow setting m.push_rules account data.
self.msc4010_push_rules_account_data = experimental.get(
"msc4010_push_rules_account_data", False
)

View File

@@ -136,7 +136,6 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"type": "array",
"items": SsoAttributeRequirement.JSON_SCHEMA,
},
"enable_registration": {"type": "boolean"},
},
}
@@ -307,7 +306,6 @@ def _parse_oidc_config_dict(
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
attribute_requirements=attribute_requirements,
enable_registration=oidc_config.get("enable_registration", True),
)
@@ -407,6 +405,3 @@ class OidcProviderConfig:
# required attributes to require in userinfo to allow login/registration
attribute_requirements: List[SsoAttributeRequirement]
# Whether automatic registrations are enabled in the ODIC flow. Defaults to True
enable_registration: bool

View File

@@ -42,17 +42,11 @@ class PushConfig(Config):
# Now check for the one in the 'email' section and honour it,
# with a warning.
email_push_config = config.get("email") or {}
redact_content = email_push_config.get("redact_content")
push_config = config.get("email") or {}
redact_content = push_config.get("redact_content")
if redact_content is not None:
print(
"The 'email.redact_content' option is deprecated: "
"please set push.include_content instead"
)
self.push_include_content = not redact_content
# Whether to apply a random delay to outbound push.
self.push_jitter_delay_ms = None
push_jitter_delay = push_config.get("jitter_delay", None)
if push_jitter_delay:
self.push_jitter_delay_ms = self.parse_duration(push_jitter_delay)

View File

@@ -35,9 +35,3 @@ class RedisConfig(Config):
self.redis_port = redis_config.get("port", 6379)
self.redis_dbid = redis_config.get("dbid", None)
self.redis_password = redis_config.get("password")
self.redis_use_tls = redis_config.get("use_tls", False)
self.redis_certificate = redis_config.get("certificate_file", None)
self.redis_private_key = redis_config.get("private_key_file", None)
self.redis_ca_file = redis_config.get("ca_file", None)
self.redis_ca_path = redis_config.get("ca_path", None)

View File

@@ -137,10 +137,6 @@ class ContentRepositoryConfig(Config):
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
self.prevent_media_downloads_from = config.get(
"prevent_media_downloads_from", []
)
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)

View File

@@ -75,7 +75,3 @@ class RoomConfig(Config):
% preset
)
# We validate the actual overrides when we try to apply them.
# When enabled, users will forget rooms when they leave them, either via a
# leave, kick or ban.
self.forget_on_leave = config.get("forget_rooms_on_leave", False)

View File

@@ -214,52 +214,17 @@ class HttpListenerConfig:
@attr.s(slots=True, frozen=True, auto_attribs=True)
class TCPListenerConfig:
"""Object describing the configuration of a single TCP listener."""
class ListenerConfig:
"""Object describing the configuration of a single listener."""
port: int = attr.ib(validator=attr.validators.instance_of(int))
bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List))
bind_addresses: List[str]
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
tls: bool = False
# http_options is only populated if type=http
http_options: Optional[HttpListenerConfig] = None
def get_site_tag(self) -> str:
"""Retrieves http_options.tag if it exists, otherwise the port number."""
if self.http_options and self.http_options.tag is not None:
return self.http_options.tag
else:
return str(self.port)
def is_tls(self) -> bool:
return self.tls
@attr.s(slots=True, frozen=True, auto_attribs=True)
class UnixListenerConfig:
"""Object describing the configuration of a single Unix socket listener."""
# Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling
# reverse proxy
path: str = attr.ib()
# A default(0o666) for this is set in parse_listener_def() below
mode: int
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
# http_options is only populated if type=http
http_options: Optional[HttpListenerConfig] = None
def get_site_tag(self) -> str:
return "unix"
def is_tls(self) -> bool:
"""Unix sockets can't have TLS"""
return False
ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ManholeConfig:
@@ -566,12 +531,12 @@ class ServerConfig(Config):
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
# no_tls is not really supported anymore, but let's grandfather it in here.
# no_tls is not really supported any more, but let's grandfather it in
# here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
if isinstance(listener, TCPListenerConfig) and listener.tls:
# Use isinstance() as the assertion this *has* a listener.port
if listener.tls:
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
@@ -612,7 +577,7 @@ class ServerConfig(Config):
)
self.listeners.append(
TCPListenerConfig(
ListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
@@ -624,7 +589,7 @@ class ServerConfig(Config):
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
TCPListenerConfig(
ListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
@@ -636,7 +601,7 @@ class ServerConfig(Config):
manhole = config.get("manhole")
if manhole:
self.listeners.append(
TCPListenerConfig(
ListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
@@ -683,7 +648,7 @@ class ServerConfig(Config):
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
TCPListenerConfig(
ListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
@@ -759,7 +724,7 @@ class ServerConfig(Config):
self.delete_stale_devices_after = None
def has_tls_listener(self) -> bool:
return any(listener.is_tls() for listener in self.listeners)
return any(listener.tls for listener in self.listeners)
def generate_config_section(
self,
@@ -939,25 +904,25 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
port = listener.get("port")
socket_path = listener.get("path")
# Either a port or a path should be declared at a minimum. Using both would be bad.
if port is not None and not isinstance(port, int):
if type(port) is not int:
raise ConfigError("Listener configuration is lacking a valid 'port' option")
if socket_path is not None and not isinstance(socket_path, str):
raise ConfigError("Listener configuration is lacking a valid 'path' option")
if port and socket_path:
raise ConfigError(
"Can not have both a UNIX socket and an IP/port declared for the same "
"resource!"
)
if port is None and socket_path is None:
raise ConfigError(
"Must have either a UNIX socket or an IP/port declared for a given "
"resource!"
)
tls = listener.get("tls", False)
bind_addresses = listener.get("bind_addresses", [])
bind_address = listener.get("bind_address")
# if bind_address was specified, add it to the list of addresses
if bind_address:
bind_addresses.append(bind_address)
# if we still have an empty list of addresses, use the default list
if not bind_addresses:
if listener_type == "metrics":
# the metrics listener doesn't support IPv6
bind_addresses.append("0.0.0.0")
else:
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
http_config = None
if listener_type == "http":
try:
@@ -967,12 +932,8 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
except ValueError as e:
raise ConfigError("Unknown listener resource") from e
# For a unix socket, default x_forwarded to True, as this is the only way of
# getting a client IP.
# Note: a reverse proxy is required anyway, as there is no way of exposing a
# unix socket to the internet.
http_config = HttpListenerConfig(
x_forwarded=listener.get("x_forwarded", (True if socket_path else False)),
x_forwarded=listener.get("x_forwarded", False),
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
@@ -980,30 +941,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
if socket_path:
# TODO: Add in path validation, like if the directory exists and is writable?
# Set a default for the permission, in case it's left out
socket_mode = listener.get("mode", 0o666)
return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config)
else:
assert port is not None
bind_addresses = listener.get("bind_addresses", [])
bind_address = listener.get("bind_address")
# if bind_address was specified, add it to the list of addresses
if bind_address:
bind_addresses.append(bind_address)
# if we still have an empty list of addresses, use the default list
if not bind_addresses:
if listener_type == "metrics":
# the metrics listener doesn't support IPv6
bind_addresses.append("0.0.0.0")
else:
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
_MANHOLE_SETTINGS_SCHEMA = {

View File

@@ -18,40 +18,22 @@ import logging
from typing import Any, Dict, List, Union
import attr
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
from synapse.config._base import (
from synapse.types import JsonDict
from ._base import (
Config,
ConfigError,
RoutableShardedWorkerHandlingConfig,
ShardedWorkerHandlingConfig,
)
from synapse.config._util import parse_and_validate_mapping
from synapse.config.server import (
DIRECT_TCP_ERROR,
TCPListenerConfig,
parse_listener_def,
)
from synapse.types import JsonDict
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
The '%s' configuration option is deprecated and will be removed in a future
Synapse version. Please use ``%s: name_of_worker`` instead.
"""
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
Missing data for a worker to connect to main process. Please include '%s' in the
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
solution) in every worker's yaml as various `worker_replication_*` settings as defined
in workers documentation here:
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
"""
# This allows for a handy knob when it's time to change from 'master' to
# something with less 'history'
MAIN_PROCESS_INSTANCE_NAME = "master"
# Use this to adjust what the main process is known as in the yaml instance_map
MAIN_PROCESS_INSTANCE_MAP_NAME = "main"
logger = logging.getLogger(__name__)
@@ -65,43 +47,13 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
return obj
class ConfigModel(BaseModel):
"""A custom version of Pydantic's BaseModel which
- ignores unknown fields and
- does not allow fields to be overwritten after construction,
but otherwise uses Pydantic's default behaviour.
For now, ignore unknown fields. In the future, we could change this so that unknown
config values cause a ValidationError, provided the error messages are meaningful to
server operators.
Subclassing in this way is recommended by
https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally
"""
class Config:
# By default, ignore fields that we don't recognise.
extra = Extra.ignore
# By default, don't allow fields to be reassigned after parsing.
allow_mutation = False
class InstanceLocationConfig(ConfigModel):
@attr.s(auto_attribs=True)
class InstanceLocationConfig:
"""The host and port to talk to an instance via HTTP replication."""
host: StrictStr
port: StrictInt
tls: StrictBool = False
def scheme(self) -> str:
"""Hardcode a retrievable scheme based on self.tls"""
return "https" if self.tls else "http"
def netloc(self) -> str:
"""Nicely format the network location data"""
return f"{self.host}:{self.port}"
host: str
port: int
tls: bool = False
@attr.s
@@ -174,15 +126,27 @@ class WorkerConfig(Config):
raise ConfigError("worker_log_config must be a string")
self.worker_log_config = worker_log_config
# The host used to connect to the main synapse
self.worker_replication_host = config.get("worker_replication_host", None)
# The port on the main synapse for TCP replication
if "worker_replication_port" in config:
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")
# The tls mode on the main synapse for HTTP replication endpoint.
# For backward compatibility this defaults to False.
self.worker_replication_http_tls = config.get(
"worker_replication_http_tls", False
)
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME
self.instance_name = self.worker_name or "master"
# FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
@@ -197,7 +161,7 @@ class WorkerConfig(Config):
manhole = config.get("worker_manhole")
if manhole:
self.worker_listeners.append(
TCPListenerConfig(
ListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
@@ -216,55 +180,10 @@ class WorkerConfig(Config):
)
# A map from instance name to host/port of their HTTP replication endpoint.
# Check if the main process is declared. Inject it into the map if it's not,
# based first on if a 'main' block is declared then on 'worker_replication_*'
# data. If both are available, default to instance_map. The main process
# itself doesn't need this data as it would never have to talk to itself.
instance_map: Dict[str, Any] = config.get("instance_map", {})
if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
# The host used to connect to the main synapse
main_host = config.get("worker_replication_host", None)
# The port on the main synapse for HTTP replication endpoint
main_port = config.get("worker_replication_http_port")
# The tls mode on the main synapse for HTTP replication endpoint.
# For backward compatibility this defaults to False.
main_tls = config.get("worker_replication_http_tls", False)
# For now, accept 'main' in the instance_map, but the replication system
# expects 'master', force that into being until it's changed later.
if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map:
instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[
MAIN_PROCESS_INSTANCE_MAP_NAME
]
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
# This is the backwards compatibility bit that handles the
# worker_replication_* bits using setdefault() to not overwrite anything.
elif main_host is not None and main_port is not None:
instance_map.setdefault(
MAIN_PROCESS_INSTANCE_NAME,
{
"host": main_host,
"port": main_port,
"tls": main_tls,
},
)
else:
# If we've gotten here, it means that the main process is not on the
# instance_map and that not enough worker_replication_* variables
# were declared in the worker's yaml.
raise ConfigError(
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
% MAIN_PROCESS_INSTANCE_MAP_NAME
)
self.instance_map: Dict[
str, InstanceLocationConfig
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
instance_map = config.get("instance_map") or {}
self.instance_map = {
name: InstanceLocationConfig(**c) for name, c in instance_map.items()
}
# Map from type of streams to source, c.f. WriterLocations.
writers = config.get("stream_writers") or {}

View File

@@ -51,7 +51,7 @@ def check_event_content_hash(
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.
hashes = event.get("hashes")
# nb it might be a immutabledict or a dict
# nb it might be a frozendict or a dict
if not isinstance(hashes, collections.abc.Mapping):
raise SynapseError(
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED

View File

@@ -150,19 +150,18 @@ class Keyring:
def __init__(
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
):
if key_fetchers is None:
# Always fetch keys from the database.
mutable_key_fetchers: List[KeyFetcher] = [StoreKeyFetcher(hs)]
# Fetch keys from configured trusted key servers, if any exist.
key_servers = hs.config.key.key_servers
if key_servers:
mutable_key_fetchers.append(PerspectivesKeyFetcher(hs))
# Finally, fetch keys from the origin server directly.
mutable_key_fetchers.append(ServerKeyFetcher(hs))
self.clock = hs.get_clock()
self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers)
else:
self._key_fetchers = key_fetchers
if key_fetchers is None:
key_fetchers = (
# Fetch keys from the database.
StoreKeyFetcher(hs),
# Fetch keys from a configured Perspectives server.
PerspectivesKeyFetcher(hs),
# Fetch keys from the origin server directly.
ServerKeyFetcher(hs),
)
self._key_fetchers = key_fetchers
self._fetch_keys_queue: BatchingQueue[
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
@@ -173,7 +172,7 @@ class Keyring:
process_batch_callback=self._inner_fetch_key_requests,
)
self._is_mine_server_name = hs.is_mine_server_name
self._hostname = hs.hostname
# build a FetchKeyResult for each of our own keys, to shortcircuit the
# fetcher.
@@ -277,7 +276,7 @@ class Keyring:
# If we are the originating server, short-circuit the key-fetch for any keys
# we already have
if self._is_mine_server_name(verify_request.server_name):
if verify_request.server_name == self._hostname:
for key_id in verify_request.key_ids:
if key_id in self._local_verify_keys:
found_keys[key_id] = self._local_verify_keys[key_id]
@@ -511,7 +510,7 @@ class StoreKeyFetcher(KeyFetcher):
for key_id in queue_value.key_ids
)
res = await self.store.get_server_keys_json(key_ids_to_fetch)
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for (server_name, key_id), key in res.items():
keys.setdefault(server_name, {})[key_id] = key
@@ -523,6 +522,7 @@ class BaseV2KeyFetcher(KeyFetcher):
super().__init__(hs)
self.store = hs.get_datastores().main
self.config = hs.config
async def process_v2_response(
self, from_server: str, response_json: JsonDict, time_added_ms: int
@@ -626,7 +626,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
super().__init__(hs)
self.clock = hs.get_clock()
self.client = hs.get_federation_http_client()
self.key_servers = hs.config.key.key_servers
self.key_servers = self.config.key.key_servers
async def _fetch_keys(
self, keys_to_fetch: List[_FetchKeyRequest]
@@ -721,7 +721,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
)
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
added_keys: Dict[Tuple[str, str], FetchKeyResult] = {}
added_keys: List[Tuple[str, str, FetchKeyResult]] = []
time_now_ms = self.clock.time_msec()
@@ -752,30 +752,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
# we continue to process the rest of the response
continue
for key_id, key in processed_response.items():
dict_key = (server_name, key_id)
if dict_key in added_keys:
already_present_key = added_keys[dict_key]
logger.warning(
"Duplicate server keys for %s (%s) from perspective %s (%r, %r)",
server_name,
key_id,
perspective_name,
already_present_key,
key,
)
if already_present_key.valid_until_ts > key.valid_until_ts:
# Favour the entry with the largest valid_until_ts,
# as `old_verify_keys` are also collected from this
# response.
continue
added_keys[dict_key] = key
added_keys.extend(
(server_name, key_id, key) for key_id, key in processed_response.items()
)
keys.setdefault(server_name, {}).update(processed_response)
await self.store.store_server_signature_keys(
await self.store.store_server_verify_keys(
perspective_name, time_now_ms, added_keys
)

View File

@@ -455,11 +455,8 @@ def _check_create(event: "EventBase") -> None:
"room appears to have unsupported version %s" % (room_version_prop,),
)
# 1.4 If content has no creator field, reject if the room version requires it.
if (
not event.room_version.msc2175_implicit_room_creator
and EventContentFields.ROOM_CREATOR not in event.content
):
# 1.4 If content has no creator field, reject.
if EventContentFields.ROOM_CREATOR not in event.content:
raise AuthError(403, "Create event lacks a 'creator' property")
@@ -494,11 +491,7 @@ def _is_membership_change_allowed(
key = (EventTypes.Create, "")
create = auth_events.get(key)
if create and event.prev_event_ids()[0] == create.event_id:
if room_version.msc2175_implicit_room_creator:
creator = create.sender
else:
creator = create.content[EventContentFields.ROOM_CREATOR]
if creator == event.state_key:
if create.content["creator"] == event.state_key:
return
target_user_id = event.state_key
@@ -793,7 +786,7 @@ def check_redaction(
"""Check whether the event sender is allowed to redact the target event.
Returns:
True if the sender is allowed to redact the target event if the
True if the the sender is allowed to redact the target event if the
target event was created by them.
False if the sender is allowed to redact the target event with no
further checks.
@@ -1011,14 +1004,10 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
# that.
key = (EventTypes.Create, "")
create_event = auth_events.get(key)
if create_event is not None:
if create_event.room_version.msc2175_implicit_room_creator:
creator = create_event.sender
else:
creator = create_event.content[EventContentFields.ROOM_CREATOR]
if creator == user_id:
return 100
return 0
if create_event is not None and create_event.content["creator"] == user_id:
return 100
else:
return 0
def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int:
@@ -1054,15 +1043,10 @@ def _verify_third_party_invite(
"""
if "third_party_invite" not in event.content:
return False
third_party_invite = event.content["third_party_invite"]
if not isinstance(third_party_invite, collections.abc.Mapping):
if "signed" not in event.content["third_party_invite"]:
return False
if "signed" not in third_party_invite:
return False
signed = third_party_invite["signed"]
if not isinstance(signed, collections.abc.Mapping):
return False
for key in {"mxid", "token", "signatures"}:
signed = event.content["third_party_invite"]["signed"]
for key in {"mxid", "token"}:
if key not in signed:
return False
@@ -1080,6 +1064,8 @@ def _verify_third_party_invite(
if signed["mxid"] != event.state_key:
return False
if signed["token"] != token:
return False
for public_key_object in get_public_keys(invite_event):
public_key = public_key_object["public_key"]
@@ -1091,9 +1077,7 @@ def _verify_third_party_invite(
verify_key = decode_verify_key_bytes(
key_name, decode_base64(public_key)
)
# verify_signed_json incorrectly states it wants a dict, it
# just needs a mapping.
verify_signed_json(signed, server, verify_key) # type: ignore[arg-type]
verify_signed_json(signed, server, verify_key)
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.

View File

@@ -198,16 +198,9 @@ class _EventInternalMetadata:
soft_failed: DictProperty[bool] = DictProperty("soft_failed")
proactively_send: DictProperty[bool] = DictProperty("proactively_send")
redacted: DictProperty[bool] = DictProperty("redacted")
historical: DictProperty[bool] = DictProperty("historical")
txn_id: DictProperty[str] = DictProperty("txn_id")
"""The transaction ID, if it was set when the event was created."""
token_id: DictProperty[int] = DictProperty("token_id")
"""The access token ID of the user who sent this event, if any."""
device_id: DictProperty[str] = DictProperty("device_id")
"""The device ID of the user who sent this event, if any."""
historical: DictProperty[bool] = DictProperty("historical")
# XXX: These are set by StreamWorkerStore._set_before_and_after.
# I'm pretty sure that these are never persisted to the database, so shouldn't
@@ -333,6 +326,7 @@ class EventBase(metaclass=abc.ABCMeta):
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
origin: DictProperty[str] = DictProperty("origin")
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None)
room_id: DictProperty[str] = DictProperty("room_id")
sender: DictProperty[str] = DictProperty("sender")
# TODO state_key should be Optional[str]. This is generally asserted in Synapse
@@ -352,13 +346,6 @@ class EventBase(metaclass=abc.ABCMeta):
def membership(self) -> str:
return self.content["membership"]
@property
def redacts(self) -> Optional[str]:
"""MSC2176 moved the redacts field into the content."""
if self.room_version.msc2176_redaction_rules:
return self.content.get("redacts")
return self.get("redacts")
def is_state(self) -> bool:
return self.get_state_key() is not None
@@ -475,7 +462,7 @@ class FrozenEvent(EventBase):
# Signatures is a dict of dicts, and this is faster than doing a
# copy.deepcopy
signatures = {
name: dict(sigs.items())
name: {sig_id: sig for sig_id, sig in sigs.items()}
for name, sigs in event_dict.pop("signatures", {}).items()
}
@@ -523,7 +510,7 @@ class FrozenEventV2(EventBase):
# Signatures is a dict of dicts, and this is faster than doing a
# copy.deepcopy
signatures = {
name: dict(sigs.items())
name: {sig_id: sig for sig_id, sig in sigs.items()}
for name, sigs in event_dict.pop("signatures", {}).items()
}

View File

@@ -173,9 +173,7 @@ class EventBuilder:
if self.is_state():
event_dict["state_key"] = self._state_key
# MSC2174 moves the redacts property to the content, it is invalid to
# provide it as a top-level property.
if self._redacts is not None and not self.room_version.msc2176_redaction_rules:
if self._redacts is not None:
event_dict["redacts"] = self._redacts
if self._origin_server_ts is not None:

View File

@@ -12,93 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
TypeVar,
Union,
)
from typing_extensions import ParamSpec
from typing import TYPE_CHECKING, Dict, Iterable, Set, Union
from twisted.internet.defer import CancelledError
from synapse.api.presence import UserPresenceState
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
from synapse.util.async_helpers import delay_cancellation
if TYPE_CHECKING:
from synapse.server import HomeServer
GET_USERS_FOR_STATES_CALLBACK = Callable[
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
]
# This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
logger = logging.getLogger(__name__)
P = ParamSpec("P")
R = TypeVar("R")
def load_legacy_presence_router(hs: "HomeServer") -> None:
"""Wrapper that loads a presence router module configured using the old
configuration, and registers the hooks they implement.
"""
if hs.config.server.presence_router_module_class is None:
return
module = hs.config.server.presence_router_module_class
config = hs.config.server.presence_router_config
api = hs.get_module_api()
presence_router = module(config=config, module_api=api)
# The known hooks. If a module implements a method which name appears in this set,
# we'll want to register it.
presence_router_methods = {
"get_users_for_states",
"get_interested_users",
}
# All methods that the module provides should be async, but this wasn't enforced
# in the old module system, so we wrap them if needed
def async_wrapper(
f: Optional[Callable[P, R]]
) -> Optional[Callable[P, Awaitable[R]]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
# Assertion required because mypy can't prove we won't change `f`
# back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
return maybe_awaitable(f(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks: Dict[str, Optional[Callable[..., Any]]] = {
hook: async_wrapper(getattr(presence_router, hook, None))
for hook in presence_router_methods
}
api.register_presence_router_callbacks(**hooks)
class PresenceRouter:
"""
A module that the homeserver will call upon to help route user presence updates to
@@ -108,30 +34,7 @@ class PresenceRouter:
ALL_USERS = "ALL"
def __init__(self, hs: "HomeServer"):
# Initially there are no callbacks
self._get_users_for_states_callbacks: List[GET_USERS_FOR_STATES_CALLBACK] = []
self._get_interested_users_callbacks: List[GET_INTERESTED_USERS_CALLBACK] = []
def register_presence_router_callbacks(
self,
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
) -> None:
# PresenceRouter modules are required to implement both of these methods
# or neither of them as they are assumed to act in a complementary manner
paired_methods = [get_users_for_states, get_interested_users]
if paired_methods.count(None) == 1:
raise RuntimeError(
"PresenceRouter modules must register neither or both of the paired callbacks: "
"[get_users_for_states, get_interested_users]"
)
# Append the methods provided to the lists of callbacks
if get_users_for_states is not None:
self._get_users_for_states_callbacks.append(get_users_for_states)
if get_interested_users is not None:
self._get_interested_users_callbacks.append(get_interested_users)
self._module_api_callbacks = hs.get_module_api_callbacks().presence_router
async def get_users_for_states(
self,
@@ -150,13 +53,13 @@ class PresenceRouter:
"""
# Bail out early if we don't have any callbacks to run.
if len(self._get_users_for_states_callbacks) == 0:
if len(self._module_api_callbacks.get_users_for_states_callbacks) == 0:
# Don't include any extra destinations for presence updates
return {}
users_for_states: Dict[str, Set[UserPresenceState]] = {}
# run all the callbacks for get_users_for_states and combine the results
for callback in self._get_users_for_states_callbacks:
for callback in self._module_api_callbacks.get_users_for_states_callbacks:
try:
# Note: result is an object here, because we don't trust modules to
# return the types they're supposed to.
@@ -206,13 +109,13 @@ class PresenceRouter:
"""
# Bail out early if we don't have any callbacks to run.
if len(self._get_interested_users_callbacks) == 0:
if len(self._module_api_callbacks.get_interested_users_callbacks) == 0:
# Don't report any additional interested users
return set()
interested_users = set()
# run all the callbacks for get_interested_users and combine the results
for callback in self._get_interested_users_callbacks:
for callback in self._module_api_callbacks.get_interested_users_callbacks:
try:
result = await delay_cancellation(callback(user_id))
except CancelledError:

View File

@@ -15,7 +15,7 @@ from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional, Tuple
import attr
from immutabledict import immutabledict
from frozendict import frozendict
from synapse.appservice import ApplicationService
from synapse.events import EventBase
@@ -489,4 +489,4 @@ def _decode_state_dict(
if input is None:
return None
return immutabledict({(etype, state_key): v for etype, state_key, v in input})
return frozendict({(etype, state_key): v for etype, state_key, v in input})

View File

@@ -13,19 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Collection,
List,
Optional,
Tuple,
Union,
)
from typing import TYPE_CHECKING, Collection, Optional, Tuple, Union
# `Literal` appears with Python 3.8.
from typing_extensions import Literal
@@ -37,7 +26,7 @@ from synapse.media._base import FileInfo
from synapse.media.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import JsonDict, RoomAlias, UserProfile
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
from synapse.util.async_helpers import delay_cancellation
from synapse.util.metrics import Measure
if TYPE_CHECKING:
@@ -46,337 +35,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
["synapse.events.EventBase"],
Awaitable[
Union[
str,
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
["synapse.events.EventBase"],
Awaitable[Union[bool, str]],
]
USER_MAY_JOIN_ROOM_CALLBACK = Callable[
[str, str, bool],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
USER_MAY_INVITE_CALLBACK = Callable[
[str, str, str],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
[str, str, str, str],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
USER_MAY_CREATE_ROOM_CALLBACK = Callable[
[str],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
[str, RoomAlias],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
[str, str],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]]
LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
[
Optional[dict],
Optional[str],
Collection[Tuple[str, str]],
],
Awaitable[RegistrationBehaviour],
]
CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[
[
Optional[dict],
Optional[str],
Collection[Tuple[str, str]],
Optional[str],
],
Awaitable[RegistrationBehaviour],
]
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
[ReadableFileWrapper, FileInfo],
Awaitable[
Union[
Literal["NOT_SPAM"],
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, JsonDict],
# Deprecated
bool,
]
],
]
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
"""Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement.
"""
spam_checkers: List[Any] = []
api = hs.get_module_api()
for module, config in hs.config.spamchecker.spam_checkers:
# Older spam checkers don't accept the `api` argument, so we
# try and detect support.
spam_args = inspect.getfullargspec(module)
if "api" in spam_args.args:
spam_checkers.append(module(config=config, api=api))
else:
spam_checkers.append(module(config=config))
# The known spam checker hooks. If a spam checker module implements a method
# which name appears in this set, we'll want to register it.
spam_checker_methods = {
"check_event_for_spam",
"user_may_invite",
"user_may_create_room",
"user_may_create_room_alias",
"user_may_publish_room",
"check_username_for_spam",
"check_registration_for_spam",
"check_media_file_for_spam",
}
for spam_checker in spam_checkers:
# Methods on legacy spam checkers might not be async, so we wrap them around a
# wrapper that will call maybe_awaitable on the result.
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
# f might be None if the callback isn't implemented by the module. In this
# case we don't want to register a callback at all so we return None.
if f is None:
return None
wrapped_func = f
if f.__name__ == "check_registration_for_spam":
checker_args = inspect.signature(f)
if len(checker_args.parameters) == 3:
# Backwards compatibility; some modules might implement a hook that
# doesn't expect a 4th argument. In this case, wrap it in a function
# that gives it only 3 arguments and drops the auth_provider_id on
# the floor.
def wrapper(
email_threepid: Optional[dict],
username: Optional[str],
request_info: Collection[Tuple[str, str]],
auth_provider_id: Optional[str],
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
# Assertion required because mypy can't prove we won't
# change `f` back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert f is not None
return f(
email_threepid,
username,
request_info,
)
wrapped_func = wrapper
elif len(checker_args.parameters) != 4:
raise RuntimeError(
"Bad signature for callback check_registration_for_spam",
)
def run(*args: Any, **kwargs: Any) -> Awaitable:
# Assertion required because mypy can't prove we won't change `f`
# back to `None`. See
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
assert wrapped_func is not None
return maybe_awaitable(wrapped_func(*args, **kwargs))
return run
# Register the hooks through the module API.
hooks = {
hook: async_wrapper(getattr(spam_checker, hook, None))
for hook in spam_checker_methods
}
api.register_spam_checker_callbacks(**hooks)
class SpamCheckerModuleApiCallbacks:
class SpamChecker:
NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM"
def __init__(self, hs: "synapse.server.HomeServer") -> None:
def __init__(self, hs: "synapse.server.HomeServer"):
self.clock = hs.get_clock()
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
self._should_drop_federated_event_callbacks: List[
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
] = []
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
self._user_may_send_3pid_invite_callbacks: List[
USER_MAY_SEND_3PID_INVITE_CALLBACK
] = []
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
self._user_may_create_room_alias_callbacks: List[
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
] = []
self._user_may_publish_room_callbacks: List[USER_MAY_PUBLISH_ROOM_CALLBACK] = []
self._check_username_for_spam_callbacks: List[
CHECK_USERNAME_FOR_SPAM_CALLBACK
] = []
self._check_registration_for_spam_callbacks: List[
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
] = []
self._check_media_file_for_spam_callbacks: List[
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK
] = []
def register_callbacks(
self,
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
should_drop_federated_event: Optional[
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
] = None,
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
user_may_create_room_alias: Optional[
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
] = None,
user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None,
check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None,
check_registration_for_spam: Optional[
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
] = None,
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
) -> None:
"""Register callbacks from module for each hook."""
if check_event_for_spam is not None:
self._check_event_for_spam_callbacks.append(check_event_for_spam)
if should_drop_federated_event is not None:
self._should_drop_federated_event_callbacks.append(
should_drop_federated_event
)
if user_may_join_room is not None:
self._user_may_join_room_callbacks.append(user_may_join_room)
if user_may_invite is not None:
self._user_may_invite_callbacks.append(user_may_invite)
if user_may_send_3pid_invite is not None:
self._user_may_send_3pid_invite_callbacks.append(
user_may_send_3pid_invite,
)
if user_may_create_room is not None:
self._user_may_create_room_callbacks.append(user_may_create_room)
if user_may_create_room_alias is not None:
self._user_may_create_room_alias_callbacks.append(
user_may_create_room_alias,
)
if user_may_publish_room is not None:
self._user_may_publish_room_callbacks.append(user_may_publish_room)
if check_username_for_spam is not None:
self._check_username_for_spam_callbacks.append(check_username_for_spam)
if check_registration_for_spam is not None:
self._check_registration_for_spam_callbacks.append(
check_registration_for_spam,
)
if check_media_file_for_spam is not None:
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
self._module_api_callbacks = hs.get_module_api_callbacks().spam_checker
@trace
async def check_event_for_spam(
@@ -400,7 +65,7 @@ class SpamCheckerModuleApiCallbacks:
string should be used as the client-facing error message. This usage is
generally discouraged as it doesn't support internationalization.
"""
for callback in self._check_event_for_spam_callbacks:
for callback in self._module_api_callbacks.check_event_for_spam_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -455,7 +120,9 @@ class SpamCheckerModuleApiCallbacks:
Returns:
True if the event should be silently dropped
"""
for callback in self._should_drop_federated_event_callbacks:
for (
callback
) in self._module_api_callbacks.should_drop_federated_event_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -479,7 +146,7 @@ class SpamCheckerModuleApiCallbacks:
Returns:
NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
"""
for callback in self._user_may_join_room_callbacks:
for callback in self._module_api_callbacks.user_may_join_room_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -520,7 +187,7 @@ class SpamCheckerModuleApiCallbacks:
Returns:
NOT_SPAM if the operation is permitted, Codes otherwise.
"""
for callback in self._user_may_invite_callbacks:
for callback in self._module_api_callbacks.user_may_invite_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -567,7 +234,7 @@ class SpamCheckerModuleApiCallbacks:
Returns:
NOT_SPAM if the operation is permitted, Codes otherwise.
"""
for callback in self._user_may_send_3pid_invite_callbacks:
for callback in self._module_api_callbacks.user_may_send_3pid_invite_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -604,7 +271,7 @@ class SpamCheckerModuleApiCallbacks:
Args:
userid: The ID of the user attempting to create a room
"""
for callback in self._user_may_create_room_callbacks:
for callback in self._module_api_callbacks.user_may_create_room_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -640,7 +307,7 @@ class SpamCheckerModuleApiCallbacks:
room_alias: The alias to be created
"""
for callback in self._user_may_create_room_alias_callbacks:
for callback in self._module_api_callbacks.user_may_create_room_alias_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -675,7 +342,7 @@ class SpamCheckerModuleApiCallbacks:
userid: The user ID attempting to publish the room
room_id: The ID of the room that would be published
"""
for callback in self._user_may_publish_room_callbacks:
for callback in self._module_api_callbacks.user_may_publish_room_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -716,7 +383,7 @@ class SpamCheckerModuleApiCallbacks:
Returns:
True if the user is spammy.
"""
for callback in self._check_username_for_spam_callbacks:
for callback in self._module_api_callbacks.check_username_for_spam_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -750,7 +417,9 @@ class SpamCheckerModuleApiCallbacks:
Enum for how the request should be handled
"""
for callback in self._check_registration_for_spam_callbacks:
for (
callback
) in self._module_api_callbacks.check_registration_for_spam_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):
@@ -793,7 +462,7 @@ class SpamCheckerModuleApiCallbacks:
file_info: Metadata about the file.
"""
for callback in self._check_media_file_for_spam_callbacks:
for callback in self._module_api_callbacks.check_media_file_for_spam_callbacks:
with Measure(
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
):

View File

@@ -0,0 +1,400 @@
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Optional, Tuple
from twisted.internet.defer import CancelledError
from synapse.api.errors import ModuleFailedException, SynapseError
from synapse.events import EventBase
from synapse.events.snapshot import UnpersistedEventContextBase
from synapse.storage.roommember import ProfileInfo
from synapse.types import Requester, StateMap
from synapse.util.async_helpers import delay_cancellation
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ThirdPartyEventRules:
"""Allows server admins to provide a Python module implementing an extra
set of rules to apply when processing events.
This is designed to help admins of closed federations with enforcing custom
behaviours.
"""
def __init__(self, hs: "HomeServer"):
self.third_party_rules = None
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._module_api_callbacks = (
hs.get_module_api_callbacks().third_party_event_rules
)
async def check_event_allowed(
self,
event: EventBase,
context: UnpersistedEventContextBase,
) -> Tuple[bool, Optional[dict]]:
"""Check if a provided event should be allowed in the given context.
The module can return:
* True: the event is allowed.
* False: the event is not allowed, and should be rejected with M_FORBIDDEN.
If the event is allowed, the module can also return a dictionary to use as a
replacement for the event.
Args:
event: The event to be checked.
context: The context of the event.
Returns:
The result from the ThirdPartyRules module, as above.
"""
# Bail out early without hitting the store if we don't have any callbacks to run.
if len(self._module_api_callbacks.check_event_allowed_callbacks) == 0:
return True, None
prev_state_ids = await context.get_prev_state_ids()
# Retrieve the state events from the database.
events = await self.store.get_events(prev_state_ids.values())
state_events = {(ev.type, ev.state_key): ev for ev in events.values()}
# Ensure that the event is frozen, to make sure that the module is not tempted
# to try to modify it. Any attempt to modify it at this point will invalidate
# the hashes and signatures.
event.freeze()
for callback in self._module_api_callbacks.check_event_allowed_callbacks:
try:
res, replacement_data = await delay_cancellation(
callback(event, state_events)
)
except CancelledError:
raise
except SynapseError as e:
# FIXME: Being able to throw SynapseErrors is relied upon by
# some modules. PR #10386 accidentally broke this ability.
# That said, we aren't keen on exposing this implementation detail
# to modules and we should one day have a proper way to do what
# is wanted.
# This module callback needs a rework so that hacks such as
# this one are not necessary.
raise e
except Exception:
raise ModuleFailedException(
"Failed to run `check_event_allowed` module API callback"
)
# Return if the event shouldn't be allowed or if the module came up with a
# replacement dict for the event.
if res is False:
return res, None
elif isinstance(replacement_data, dict):
return True, replacement_data
return True, None
async def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
) -> None:
"""Intercept requests to create room to maybe deny it (via an exception) or
update the request config.
Args:
requester
config: The creation config from the client.
is_requester_admin: If the requester is an admin
"""
for callback in self._module_api_callbacks.on_create_room_callbacks:
try:
await callback(requester, config, is_requester_admin)
except Exception as e:
# Don't silence the errors raised by this callback since we expect it to
# raise an exception to deny the creation of the room; instead make sure
# it's a SynapseError we can send to clients.
if not isinstance(e, SynapseError):
e = SynapseError(
403, "Room creation forbidden with these parameters"
)
raise e
async def check_threepid_can_be_invited(
self, medium: str, address: str, room_id: str
) -> bool:
"""Check if a provided 3PID can be invited in the given room.
Args:
medium: The 3PID's medium.
address: The 3PID's address.
room_id: The room we want to invite the threepid to.
Returns:
True if the 3PID can be invited, False if not.
"""
# Bail out early without hitting the store if we don't have any callbacks to run.
if len(self._module_api_callbacks.check_threepid_can_be_invited_callbacks) == 0:
return True
state_events = await self._get_state_map_for_room(room_id)
for (
callback
) in self._module_api_callbacks.check_threepid_can_be_invited_callbacks:
try:
threepid_can_be_invited = await delay_cancellation(
callback(medium, address, state_events)
)
if threepid_can_be_invited is False:
return False
except CancelledError:
raise
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
return True
async def check_visibility_can_be_modified(
self, room_id: str, new_visibility: str
) -> bool:
"""Check if a room is allowed to be published to, or removed from, the public room
list.
Args:
room_id: The ID of the room.
new_visibility: The new visibility state. Either "public" or "private".
Returns:
True if the room's visibility can be modified, False if not.
"""
# Bail out early without hitting the store if we don't have any callback
if (
len(self._module_api_callbacks.check_visibility_can_be_modified_callbacks)
== 0
):
return True
state_events = await self._get_state_map_for_room(room_id)
for (
callback
) in self._module_api_callbacks.check_visibility_can_be_modified_callbacks:
try:
visibility_can_be_modified = await delay_cancellation(
callback(room_id, state_events, new_visibility)
)
if visibility_can_be_modified is False:
return False
except CancelledError:
raise
except Exception as e:
logger.warning("Failed to run module API callback %s: %s", callback, e)
return True
async def on_new_event(self, event_id: str) -> None:
"""Let modules act on events after they've been sent (e.g. auto-accepting
invites, etc.)
Args:
event_id: The ID of the event.
"""
# Bail out early without hitting the store if we don't have any callbacks
if len(self._module_api_callbacks.on_new_event_callbacks) == 0:
return
event = await self.store.get_event(event_id)
state_events = await self._get_state_map_for_room(event.room_id)
for callback in self._module_api_callbacks.on_new_event_callbacks:
try:
await callback(event, state_events)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool:
"""Intercept requests to shutdown a room. If `False` is returned, the
room must not be shut down.
Args:
requester: The ID of the user requesting the shutdown.
room_id: The ID of the room.
"""
for callback in self._module_api_callbacks.check_can_shutdown_room_callbacks:
try:
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
if can_shutdown_room is False:
return False
except CancelledError:
raise
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
return True
async def check_can_deactivate_user(
self,
user_id: str,
by_admin: bool,
) -> bool:
"""Intercept requests to deactivate a user. If `False` is returned, the
user should not be deactivated.
Args:
requester
user_id: The ID of the room.
"""
for callback in self._module_api_callbacks.check_can_deactivate_user_callbacks:
try:
can_deactivate_user = await delay_cancellation(
callback(user_id, by_admin)
)
if can_deactivate_user is False:
return False
except CancelledError:
raise
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
return True
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
"""Given a room ID, return the state events of that room.
Args:
room_id: The ID of the room.
Returns:
A dict mapping (event type, state key) to state event.
"""
return await self._storage_controllers.state.get_current_state(room_id)
async def on_profile_update(
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
) -> None:
"""Called after the global profile of a user has been updated. Does not include
per-room profile changes.
Args:
user_id: The user whose profile was changed.
new_profile: The updated profile for the user.
by_admin: Whether the profile update was performed by a server admin.
deactivation: Whether this change was made while deactivating the user.
"""
for callback in self._module_api_callbacks.on_profile_update_callbacks:
try:
await callback(user_id, new_profile, by_admin, deactivation)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_user_deactivation_status_changed(
self, user_id: str, deactivated: bool, by_admin: bool
) -> None:
"""Called after a user has been deactivated or reactivated.
Args:
user_id: The deactivated user.
deactivated: Whether the user is now deactivated.
by_admin: Whether the deactivation was performed by a server admin.
"""
for (
callback
) in self._module_api_callbacks.on_user_deactivation_status_changed_callbacks:
try:
await callback(user_id, deactivated, by_admin)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> None:
"""Called after a threepid association has been verified and stored.
Note that this callback is called when an association is created on the
local homeserver, not when it's created on an identity server (and then kept track
of so that it can be unbound on the same IS later on).
THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the
`on_add_user_third_party_identifier` callback method instead.
Args:
user_id: the user being associated with the threepid.
medium: the threepid's medium.
address: the threepid's address.
"""
for callback in self._module_api_callbacks.on_threepid_bind_callbacks:
try:
await callback(user_id, medium, address)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_add_user_third_party_identifier(
self, user_id: str, medium: str, address: str
) -> None:
"""Called when an association between a user's Matrix ID and a third-party ID
(email, phone number) has successfully been registered on the homeserver.
Args:
user_id: The User ID included in the association.
medium: The medium of the third-party ID (email, msisdn).
address: The address of the third-party ID (i.e. an email address).
"""
for (
callback
) in self._module_api_callbacks.on_add_user_third_party_identifier_callbacks:
try:
await callback(user_id, medium, address)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
async def on_remove_user_third_party_identifier(
self, user_id: str, medium: str, address: str
) -> None:
"""Called when an association between a user's Matrix ID and a third-party ID
(email, phone number) has been successfully removed on the homeserver.
This is called *after* any known bindings on identity servers for this
association have been removed.
Args:
user_id: The User ID included in the removed association.
medium: The medium of the third-party ID (email, msisdn).
address: The address of the third-party ID (i.e. an email address).
"""
for (
callback
) in self._module_api_callbacks.on_remove_user_third_party_identifier_callbacks:
try:
await callback(user_id, medium, address)
except Exception as e:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)

View File

@@ -106,6 +106,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
"depth",
"prev_events",
"auth_events",
"origin",
"origin_server_ts",
]
@@ -113,10 +114,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
if not room_version.msc2176_redaction_rules:
allowed_keys.extend(["prev_state", "membership"])
# Room versions before MSC3989 kept the origin field.
if not room_version.msc3989_redaction_rules:
allowed_keys.append("origin")
event_type = event_dict["type"]
new_content = {}
@@ -130,16 +127,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
add_fields("membership")
if room_version.msc3375_redaction_rules:
add_fields(EventContentFields.AUTHORISING_USER)
if room_version.msc3821_redaction_rules:
# Preserve the signed field under third_party_invite.
third_party_invite = event_dict["content"].get("third_party_invite")
if isinstance(third_party_invite, collections.abc.Mapping):
new_content["third_party_invite"] = {}
if "signed" in third_party_invite:
new_content["third_party_invite"]["signed"] = third_party_invite[
"signed"
]
elif event_type == EventTypes.Create:
# MSC2176 rules state that create events cannot be redacted.
if room_version.msc2176_redaction_rules:
@@ -181,18 +168,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
# Protect the rel_type and event_id fields under the m.relates_to field.
if room_version.msc3389_relation_redactions:
relates_to = event_dict["content"].get("m.relates_to")
if isinstance(relates_to, collections.abc.Mapping):
new_relates_to = {}
for field in ("rel_type", "event_id"):
if field in relates_to:
new_relates_to[field] = relates_to[field]
# Only include a non-empty relates_to field.
if new_relates_to:
new_content["m.relates_to"] = new_relates_to
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
allowed_fields["content"] = new_content
@@ -361,7 +336,6 @@ def serialize_event(
time_now_ms: int,
*,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
msc3970_enabled: bool = False,
) -> JsonDict:
"""Serialize event for clients
@@ -369,8 +343,6 @@ def serialize_event(
e
time_now_ms
config: Event serialization config
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
include the `transaction_id` in the event's `unsigned` section.
Returns:
The serialized event dictionary.
@@ -383,7 +355,7 @@ def serialize_event(
time_now_ms = int(time_now_ms)
# Should this strip out None's?
d = dict(e.get_dict().items())
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
@@ -393,43 +365,27 @@ def serialize_event(
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
e.unsigned["redacted_because"],
time_now_ms,
config=config,
msc3970_enabled=msc3970_enabled,
e.unsigned["redacted_because"], time_now_ms, config=config
)
# If we have a txn_id saved in the internal_metadata, we should include it in the
# unsigned section of the event if it was sent by the same session as the one
# requesting the event.
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
# There is a special case for guests, because they only have one access token
# without associated access_token_id, so we always include the txn_id for events
# they sent.
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None and config.requester is not None:
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
# event internal metadata. Since we were not recording them before, if it hasn't
# been recorded, we fallback to the old behaviour.
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
if msc3970_enabled and event_device_id is not None:
if event_device_id == config.requester.device_id:
d["unsigned"]["transaction_id"] = txn_id
else:
# The pre-MSC3970 behaviour is to only include the transaction ID if the
# event was sent from the same access token. For regular users, we can use
# the access token ID to determine this. For guests, we can't, but since
# each guest only has one access token, we can just check that the event was
# sent by the same user as the one requesting the event.
event_token_id: Optional[int] = getattr(
e.internal_metadata, "token_id", None
event_token_id = getattr(e.internal_metadata, "token_id", None)
if config.requester.user.to_string() == e.sender and (
(
event_token_id is not None
and config.requester.access_token_id is not None
and event_token_id == config.requester.access_token_id
)
if config.requester.user.to_string() == e.sender and (
(
event_token_id is not None
and config.requester.access_token_id is not None
and event_token_id == config.requester.access_token_id
)
or config.requester.is_guest
):
d["unsigned"]["transaction_id"] = txn_id
or config.requester.is_guest
):
d["unsigned"]["transaction_id"] = txn_id
# invite_room_state and knock_room_state are a list of stripped room state events
# that are meant to provide metadata about a room to an invitee/knocker. They are
@@ -460,9 +416,6 @@ class EventClientSerializer:
clients.
"""
def __init__(self, *, msc3970_enabled: bool = False):
self._msc3970_enabled = msc3970_enabled
def serialize_event(
self,
event: Union[JsonDict, EventBase],
@@ -487,9 +440,7 @@ class EventClientSerializer:
if not isinstance(event, EventBase):
return event
serialized_event = serialize_event(
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
)
serialized_event = serialize_event(event, time_now, config=config)
# Check if there are any bundled aggregations to include with the event.
if bundle_aggregations:
@@ -547,9 +498,7 @@ class EventClientSerializer:
# `sender` of the edit; however MSC3925 proposes extending it to the whole
# of the edit, which is what we do here.
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
event_aggregations.replace,
time_now,
config=config,
event_aggregations.replace, time_now, config=config
)
# Include any threaded replies to this event.
@@ -618,7 +567,7 @@ PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
def copy_and_fixup_power_levels_contents(
old_power_levels: PowerLevelsContent,
) -> Dict[str, Union[int, Dict[str, int]]]:
"""Copy the content of a power_levels event, unfreezing immutabledicts along the way.
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
We accept as input power level values which are strings, provided they represent an
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers

View File

@@ -12,17 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
from typing import Iterable, List, Type, Union, cast
from typing import Iterable, Type, Union, cast
import jsonschema
from pydantic import Field, StrictBool, StrictStr
from synapse.api.constants import (
MAX_ALIAS_LENGTH,
EventContentFields,
EventTypes,
Membership,
)
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions
from synapse.config.homeserver import HomeServerConfig
@@ -34,8 +28,6 @@ from synapse.events.utils import (
validate_canonicaljson,
)
from synapse.federation.federation_server import server_matches_acl_event
from synapse.http.servlet import validate_json_object
from synapse.rest.models import RequestBodyModel
from synapse.types import EventID, JsonDict, RoomID, UserID
@@ -96,27 +88,27 @@ class EventValidator:
Codes.INVALID_PARAM,
)
elif event.type == EventTypes.Retention:
if event.type == EventTypes.Retention:
self._validate_retention(event)
elif event.type == EventTypes.ServerACL:
if event.type == EventTypes.ServerACL:
if not server_matches_acl_event(config.server.server_name, event):
raise SynapseError(
400, "Can't create an ACL event that denies the local server"
)
elif event.type == EventTypes.PowerLevels:
if event.type == EventTypes.PowerLevels:
try:
jsonschema.validate(
instance=event.content,
schema=POWER_LEVELS_SCHEMA,
cls=POWER_LEVELS_VALIDATOR,
cls=plValidator,
)
except jsonschema.ValidationError as e:
if e.path:
# example: "users_default": '0' is not of type 'integer'
# cast safety: path entries can be integers, if we fail to validate
# items in an array. However, the POWER_LEVELS_SCHEMA doesn't expect
# items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
# to see any arrays.
message = (
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
@@ -133,15 +125,6 @@ class EventValidator:
errcode=Codes.BAD_JSON,
)
# If the event contains a mentions key, validate it.
if (
EventContentFields.MSC3952_MENTIONS in event.content
and config.experimental.msc3952_intentional_mentions
):
validate_json_object(
event.content[EventContentFields.MSC3952_MENTIONS], Mentions
)
def _validate_retention(self, event: EventBase) -> None:
"""Checks that an event that defines the retention policy for a room respects the
format enforced by the spec.
@@ -270,17 +253,12 @@ POWER_LEVELS_SCHEMA = {
}
class Mentions(RequestBodyModel):
user_ids: List[StrictStr] = Field(default_factory=list)
room: StrictBool = False
# This could return something newer than Draft 7, but that's the current "latest"
# validator.
def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
validator = jsonschema.validators.validator_for(schema)
def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
# by default jsonschema does not consider a immutabledict to be an object so
# by default jsonschema does not consider a frozendict to be an object so
# we need to use a custom type checker
# https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types
type_checker = validator.TYPE_CHECKER.redefine(
@@ -290,4 +268,4 @@ def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
return jsonschema.validators.extend(validator, type_checker=type_checker)
POWER_LEVELS_VALIDATOR = _create_validator(POWER_LEVELS_SCHEMA)
plValidator = _create_power_level_validator()

View File

@@ -49,9 +49,9 @@ class FederationBase:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self._is_mine_server_name = hs.is_mine_server_name
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self.spam_checker = hs.get_spam_checker()
self.store = hs.get_datastores().main
self._clock = hs.get_clock()
self._storage_controllers = hs.get_storage_controllers()
@@ -137,9 +137,9 @@ class FederationBase:
)
return redacted_event
spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu)
spam_check = await self.spam_checker.check_event_for_spam(pdu)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
if spam_check != self.spam_checker.NOT_SPAM:
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
log_kv(
{

View File

@@ -61,7 +61,6 @@ from synapse.federation.federation_base import (
event_from_pdu_json,
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.client import is_unknown_endpoint
from synapse.http.types import QueryParams
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
from synapse.types import JsonDict, UserID, get_domain_from_id
@@ -235,10 +234,7 @@ class FederationClient(FederationBase):
)
async def claim_client_keys(
self,
destination: str,
query: Dict[str, Dict[str, Dict[str, int]]],
timeout: Optional[int],
self, destination: str, content: JsonDict, timeout: Optional[int]
) -> JsonDict:
"""Claims one-time keys for a device hosted on a remote server.
@@ -250,50 +246,6 @@ class FederationClient(FederationBase):
The JSON object from the response
"""
sent_queries_counter.labels("client_one_time_keys").inc()
# Convert the query with counts into a stable and unstable query and check
# if attempting to claim more than 1 OTK.
content: Dict[str, Dict[str, str]] = {}
unstable_content: Dict[str, Dict[str, List[str]]] = {}
use_unstable = False
for user_id, one_time_keys in query.items():
for device_id, algorithms in one_time_keys.items():
if any(count > 1 for count in algorithms.values()):
use_unstable = True
if algorithms:
# For the stable query, choose only the first algorithm.
content.setdefault(user_id, {})[device_id] = next(iter(algorithms))
# For the unstable query, repeat each algorithm by count, then
# splat those into chain to get a flattened list of all algorithms.
#
# Converts from {"algo1": 2, "algo2": 2} to ["algo1", "algo1", "algo2"].
unstable_content.setdefault(user_id, {})[device_id] = list(
itertools.chain(
*(
itertools.repeat(algorithm, count)
for algorithm, count in algorithms.items()
)
)
)
if use_unstable:
try:
return await self.transport_layer.claim_client_keys_unstable(
destination, unstable_content, timeout
)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised endpoint,
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
# and raise.
if not is_unknown_endpoint(e):
raise
logger.debug(
"Couldn't claim client keys with the unstable API, falling back to the v1 API"
)
else:
logger.debug("Skipping unstable claim client keys API")
return await self.transport_layer.claim_client_keys(
destination, content, timeout
)
@@ -327,11 +279,15 @@ class FederationClient(FederationBase):
logger.debug("backfill transaction_data=%r", transaction_data)
if not isinstance(transaction_data, dict):
raise InvalidResponseError("Backfill transaction_data is not a dict.")
# TODO we probably want an exception type specific to federation
# client validation.
raise TypeError("Backfill transaction_data is not a dict.")
transaction_data_pdus = transaction_data.get("pdus")
if not isinstance(transaction_data_pdus, list):
raise InvalidResponseError("transaction_data.pdus is not a list.")
# TODO we probably want an exception type specific to federation
# client validation.
raise TypeError("transaction_data.pdus is not a list.")
room_version = await self.store.get_room_version(room_id)
@@ -803,6 +759,43 @@ class FederationClient(FederationBase):
return signed_auth
def _is_unknown_endpoint(
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
) -> bool:
"""
Returns true if the response was due to an endpoint being unimplemented.
Args:
e: The error response received from the remote server.
synapse_error: The above error converted to a SynapseError. This is
automatically generated if not provided.
"""
if synapse_error is None:
synapse_error = e.to_synapse_error()
# MSC3743 specifies that servers should return a 404 or 405 with an errcode
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
# to an unknown method, respectively.
#
# Older versions of servers don't properly handle this. This needs to be
# rather specific as some endpoints truly do return 404 errors.
return (
# 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
(e.code == 404 or e.code == 405)
and (
# Older Dendrites returned a text or empty body.
# Older Conduit returned an empty body.
not e.response
or e.response == b"404 page not found"
# The proper response JSON with M_UNRECOGNIZED errcode.
or synapse_error.errcode == Codes.UNRECOGNIZED
)
) or (
# Older Synapses returned a 400 error.
e.code == 400
and synapse_error.errcode == Codes.UNRECOGNIZED
)
async def _try_destination_list(
self,
description: str,
@@ -854,7 +847,7 @@ class FederationClient(FederationBase):
for destination in destinations:
# We don't want to ask our own server for information we don't have
if self._is_mine_server_name(destination):
if destination == self.server_name:
continue
try:
@@ -894,7 +887,7 @@ class FederationClient(FederationBase):
elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes:
failover = True
elif failover_on_unknown_endpoint and is_unknown_endpoint(
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
e, synapse_error
):
failover = True
@@ -1230,7 +1223,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
# and raise.
if not is_unknown_endpoint(e):
if not self._is_unknown_endpoint(e):
raise
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
@@ -1304,7 +1297,7 @@ class FederationClient(FederationBase):
# fallback to the v1 endpoint if the room uses old-style event IDs.
# Otherwise, consider it a legitimate error and raise.
err = e.to_synapse_error()
if is_unknown_endpoint(e, err):
if self._is_unknown_endpoint(e, err):
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
raise SynapseError(
400,
@@ -1365,7 +1358,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
# and raise.
if not is_unknown_endpoint(e):
if not self._is_unknown_endpoint(e):
raise
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
@@ -1536,7 +1529,7 @@ class FederationClient(FederationBase):
self, destinations: Iterable[str], room_id: str, event_dict: JsonDict
) -> None:
for destination in destinations:
if self._is_mine_server_name(destination):
if destination == self.server_name:
continue
try:
@@ -1636,7 +1629,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the unstable endpoint. Otherwise, consider it a
# legitimate error and raise.
if not is_unknown_endpoint(e):
if not self._is_unknown_endpoint(e):
raise
logger.debug(

View File

@@ -86,7 +86,7 @@ from synapse.storage.databases.main.lock import Lock
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util import json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_server_name
@@ -129,14 +129,12 @@ class FederationServer(FederationBase):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self.handler = hs.get_federation_handler()
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self._spam_checker = hs.get_spam_checker()
self._federation_event_handler = hs.get_federation_event_handler()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self._room_member_handler = hs.get_room_member_handler()
self._e2e_keys_handler = hs.get_e2e_keys_handler()
self._state_storage_controller = hs.get_storage_controllers().state
@@ -943,7 +941,7 @@ class FederationServer(FederationBase):
authorising_server = get_domain_from_id(
event.content[EventContentFields.AUTHORISING_USER]
)
if not self._is_mine_server_name(authorising_server):
if authorising_server != self.server_name:
raise SynapseError(
400,
f"Cannot authorise request from resident server: {authorising_server}",
@@ -1006,19 +1004,23 @@ class FederationServer(FederationBase):
@trace
async def on_claim_client_keys(
self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self._e2e_keys_handler.claim_local_one_time_keys(
query, always_include_fallback_keys=always_include_fallback_keys
)
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for result in results:
for user_id, device_keys in result.items():
for device_id, keys in device_keys.items():
for key_id, key in keys.items():
json_result.setdefault(user_id, {})[device_id] = {key_id: key}
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
json_result: Dict[str, Dict[str, dict]] = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json_decoder.decode(json_str)
}
logger.info(
"Claimed one-time-keys: %s",
@@ -1127,7 +1129,7 @@ class FederationServer(FederationBase):
logger.warning("event id %s: %s", pdu.event_id, e)
raise FederationError("ERROR", 403, str(e), affected=pdu.event_id)
if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu):
if await self._spam_checker.should_drop_federated_event(pdu):
logger.warning(
"Unstaged federated event contains spam, dropping %s", pdu.event_id
)
@@ -1172,9 +1174,7 @@ class FederationServer(FederationBase):
origin, event = next
if await self._spam_checker_module_callbacks.should_drop_federated_event(
event
):
if await self._spam_checker.should_drop_federated_event(event):
logger.warning(
"Staged federated event contains spam, dropping %s",
event.event_id,

View File

@@ -68,7 +68,6 @@ class FederationRemoteSendQueue(AbstractFederationSender):
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.is_mine_server_name = hs.is_mine_server_name
# We may have multiple federation sender instances, so we need to track
# their positions separately.
@@ -199,7 +198,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
key: Optional[Hashable] = None,
) -> None:
"""As per FederationSender"""
if self.is_mine_server_name(destination):
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
return
@@ -245,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
self.notifier.on_new_replication_data()
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.

View File

@@ -11,119 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Federation Sender is responsible for sending Persistent Data Units (PDUs)
and Ephemeral Data Units (EDUs) to other homeservers using
the `/send` Federation API.
## How do PDUs get sent?
The Federation Sender is made aware of new PDUs due to `FederationSender.notify_new_events`.
When the sender is notified about a newly-persisted PDU that originates from this homeserver
and is not an out-of-band event, we pass the PDU to the `_PerDestinationQueue` for each
remote homeserver that is in the room at that point in the DAG.
### Per-Destination Queues
There is one `PerDestinationQueue` per 'destination' homeserver.
The `PerDestinationQueue` maintains the following information about the destination:
- whether the destination is currently in [catch-up mode (see below)](#catch-up-mode);
- a queue of PDUs to be sent to the destination; and
- a queue of EDUs to be sent to the destination (not considered in this section).
Upon a new PDU being enqueued, `attempt_new_transaction` is called to start a new
transaction if there is not already one in progress.
### Transactions and the Transaction Transmission Loop
Each federation HTTP request to the `/send` endpoint is referred to as a 'transaction'.
The body of the HTTP request contains a list of PDUs and EDUs to send to the destination.
The *Transaction Transmission Loop* (`_transaction_transmission_loop`) is responsible
for emptying the queued PDUs (and EDUs) from a `PerDestinationQueue` by sending
them to the destination.
There can only be one transaction in flight for a given destination at any time.
(Other than preventing us from overloading the destination, this also makes it easier to
reason about because we process events sequentially for each destination.
This is useful for *Catch-Up Mode*, described later.)
The loop continues so long as there is anything to send. At each iteration of the loop, we:
- dequeue up to 50 PDUs (and up to 100 EDUs).
- make the `/send` request to the destination homeserver with the dequeued PDUs and EDUs.
- if successful, make note of the fact that we succeeded in transmitting PDUs up to
the given `stream_ordering` of the latest PDU by
- if unsuccessful, back off from the remote homeserver for some time.
If we have been unsuccessful for too long (when the backoff interval grows to exceed 1 hour),
the in-memory queues are emptied and we enter [*Catch-Up Mode*, described below](#catch-up-mode).
### Catch-Up Mode
When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop*
(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`.
(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour
be resumed.)
*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to
connection problems, is responsible for sending PDUs that have been missed by the destination
homeserver. (PDUs can be missed because the `PerDestinationQueue` is volatile — i.e. resets
on startup — and it does not hold PDUs forever if `/send` requests to the destination fail.)
The catch-up mechanism makes use of the `last_successful_stream_ordering` column in the
`destinations` table (which gives the `stream_ordering` of the most recent successfully
sent PDU) and the `stream_ordering` column in the `destination_rooms` table (which gives,
for each room, the `stream_ordering` of the most recent PDU that needs to be sent to this
destination).
Each iteration of the loop pulls out 50 `destination_rooms` entries with the oldest
`stream_ordering`s that are greater than the `last_successful_stream_ordering`.
In other words, from the set of latest PDUs in each room to be sent to the destination,
the 50 oldest such PDUs are pulled out.
These PDUs could, in principle, now be directly sent to the destination. However, as an
optimisation intended to prevent overloading destination homeservers, we instead attempt
to send the latest forward extremities so long as the destination homeserver is still
eligible to receive those.
This reduces load on the destination **in aggregate** because all Synapse homeservers
will behave according to this principle and therefore avoid sending lots of different PDUs
at different points in the DAG to a recovering homeserver.
*This optimisation is not currently valid in rooms which are partial-state on this homeserver,
since we are unable to determine whether the destination homeserver is eligible to receive
the latest forward extremities unless this homeserver sent those PDUs — in this case, we
just send the latest PDUs originating from this server and skip this optimisation.*
Whilst PDUs are sent through this mechanism, the position of `last_successful_stream_ordering`
is advanced as normal.
Once there are no longer any rooms containing outstanding PDUs to be sent to the destination
*that are not already in the `PerDestinationQueue` because they arrived since Catch-Up Mode
was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmission_loop`.
#### A note on failures and back-offs
If a remote server is unreachable over federation, we back off from that server,
with an exponentially-increasing retry interval.
Whilst we don't automatically retry after the interval, we prevent making new attempts
until such time as the back-off has cleared.
Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission
loop resumes and empties the queue by making federation requests.
If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent
unbounded growth) and Catch-Up Mode is entered.
It is worth noting that the back-off for a remote server is cleared once an inbound
request from that remote server is received (see `notify_remote_server_up`).
At this point, the transaction transmission loop is also started up, to proactively
send missed PDUs and EDUs to the destination (i.e. you don't need to wait for a new PDU
or EDU, destined for that destination, to be created in order to send out missed PDUs and
EDUs).
"""
import abc
import logging
@@ -362,7 +249,6 @@ class FederationSender(AbstractFederationSender):
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
self.is_mine_server_name = hs.is_mine_server_name
self._presence_router: Optional["PresenceRouter"] = None
self._transaction_manager = TransactionManager(hs)
@@ -767,7 +653,7 @@ class FederationSender(AbstractFederationSender):
domains = [
d
for d in domains_set
if not self.is_mine_server_name(d)
if d != self.server_name
and self._federation_shard_config.should_handle(self._instance_name, d)
]
if not domains:
@@ -833,7 +719,7 @@ class FederationSender(AbstractFederationSender):
assert self.is_mine_id(state.user_id)
for destination in destinations:
if self.is_mine_server_name(destination):
if destination == self.server_name:
continue
if not self._federation_shard_config.should_handle(
self._instance_name, destination
@@ -861,7 +747,7 @@ class FederationSender(AbstractFederationSender):
content: content of EDU
key: clobbering key for this edu
"""
if self.is_mine_server_name(destination):
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
return
@@ -897,8 +783,8 @@ class FederationSender(AbstractFederationSender):
else:
queue.send_edu(edu)
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
if self.is_mine_server_name(destination):
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
if destination == self.server_name:
logger.warning("Not sending device update to ourselves")
return
@@ -920,7 +806,7 @@ class FederationSender(AbstractFederationSender):
might have come back.
"""
if self.is_mine_server_name(destination):
if destination == self.server_name:
logger.warning("Not waking up ourselves")
return

View File

@@ -497,8 +497,8 @@ class PerDestinationQueue:
#
# Note: `catchup_pdus` will have exactly one PDU per room.
for pdu in catchup_pdus:
# The PDU from the DB will be the newest PDU in the room from
# *this server* that we tried---but were unable---to send to the remote.
# The PDU from the DB will be the last PDU in the room from
# *this server* that wasn't sent to the remote. However, other
# servers may have sent lots of events since then, and we want
# to try and tell the remote only about the *latest* events in
# the room. This is so that it doesn't get inundated by events
@@ -516,11 +516,6 @@ class PerDestinationQueue:
# If the event is in the extremities, then great! We can just
# use that without having to do further checks.
room_catchup_pdus = [pdu]
elif await self._store.is_partial_state_room(pdu.room_id):
# We can't be sure which events the destination should
# see using only partial state. Avoid doing so, and just retry
# sending our the newest PDU the remote is missing from us.
room_catchup_pdus = [pdu]
else:
# If not, fetch the extremities and figure out which we can
# send.
@@ -552,8 +547,6 @@ class PerDestinationQueue:
self._server_name,
new_pdus,
redact=False,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
)
# If we've filtered out all the extremities, fall back to

View File

@@ -16,7 +16,6 @@
import logging
import urllib
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
@@ -43,24 +42,21 @@ from synapse.api.urls import (
)
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.units import Transaction
from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser
from synapse.http.matrixfederationclient import ByteParser
from synapse.http.types import QueryParams
from synapse.types import JsonDict
from synapse.util import ExceptionBundle
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
class TransportLayerClient:
"""Sends federation HTTP requests to other servers"""
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
self.server_name = hs.hostname
self.client = hs.get_federation_http_client()
self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled
self._is_mine_server_name = hs.is_mine_server_name
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
@@ -137,7 +133,7 @@ class TransportLayerClient:
async def backfill(
self, destination: str, room_id: str, event_tuples: Collection[str], limit: int
) -> Optional[Union[JsonDict, list]]:
) -> Optional[JsonDict]:
"""Requests `limit` previous PDUs in a given context before list of
PDUs.
@@ -235,7 +231,7 @@ class TransportLayerClient:
transaction.transaction_id,
)
if self._is_mine_server_name(transaction.destination):
if transaction.destination == self.server_name:
raise RuntimeError("Transport layer cannot send to itself!")
# FIXME: This is only used by the tests. The actual json sent is
@@ -392,7 +388,6 @@ class TransportLayerClient:
# server was just having a momentary blip, the room will be out of
# sync.
ignore_backoff=True,
parser=LegacyJsonSendParser(),
)
async def send_leave_v2(
@@ -450,11 +445,7 @@ class TransportLayerClient:
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
return await self.client.put_json(
destination=destination,
path=path,
data=content,
ignore_backoff=True,
parser=LegacyJsonSendParser(),
destination=destination, path=path, data=content, ignore_backoff=True
)
async def send_invite_v2(
@@ -650,10 +641,10 @@ class TransportLayerClient:
Response:
{
"one_time_keys": {
"device_keys": {
"<user_id>": {
"<device_id>": {
"<algorithm>:<key_id>": <OTK JSON>
"<algorithm>:<key_id>": "<key_base64>"
}
}
}
@@ -669,50 +660,7 @@ class TransportLayerClient:
path = _create_v1_path("/user/keys/claim")
return await self.client.post_json(
destination=destination,
path=path,
data={"one_time_keys": query_content},
timeout=timeout,
)
async def claim_client_keys_unstable(
self, destination: str, query_content: JsonDict, timeout: Optional[int]
) -> JsonDict:
"""Claim one-time keys for a list of devices hosted on a remote server.
Request:
{
"one_time_keys": {
"<user_id>": {
"<device_id>": {"<algorithm>": <count>}
}
}
}
Response:
{
"one_time_keys": {
"<user_id>": {
"<device_id>": {
"<algorithm>:<key_id>": <OTK JSON>
}
}
}
}
Args:
destination: The server to query.
query_content: The user ids to query.
Returns:
A dict containing the one-time keys.
"""
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/user/keys/claim")
return await self.client.post_json(
destination=destination,
path=path,
data={"one_time_keys": query_content},
timeout=timeout,
destination=destination, path=path, data=query_content, timeout=timeout
)
async def get_missing_events(

View File

@@ -25,7 +25,6 @@ from synapse.federation.transport.server._base import (
from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
FederationUnstableClientKeysClaimServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -109,7 +108,6 @@ class PublicRoomList(BaseFederationServlet):
"""
PATH = "/publicRooms"
CATEGORY = "Federation requests"
def __init__(
self,
@@ -214,7 +212,6 @@ class OpenIdUserInfo(BaseFederationServlet):
"""
PATH = "/openid/userinfo"
CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -299,11 +296,6 @@ def register_servlets(
and not hs.config.experimental.msc3720_enabled
):
continue
if (
servletclass == FederationUnstableClientKeysClaimServlet
and not hs.config.experimental.msc3983_appservice_otk_claims
):
continue
servletclass(
hs=hs,

View File

@@ -57,7 +57,6 @@ class Authenticator:
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self._is_mine_server_name = hs.is_mine_server_name
self.store = hs.get_datastores().main
self.federation_domain_whitelist = (
hs.config.federation.federation_domain_whitelist
@@ -101,9 +100,7 @@ class Authenticator:
json_request["signatures"].setdefault(origin, {})[key] = sig
# if the origin_server sent a destination along it needs to match our own server_name
if destination is not None and not self._is_mine_server_name(
destination
):
if destination is not None and destination != self.server_name:
raise AuthenticationError(
HTTPStatus.UNAUTHORIZED,
"Destination mismatch in auth header",

View File

@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import Counter
from typing import (
TYPE_CHECKING,
Dict,
@@ -71,7 +70,6 @@ class BaseFederationServerServlet(BaseFederationServlet):
class FederationSendServlet(BaseFederationServerServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
CATEGORY = "Inbound federation transaction request"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
@@ -140,7 +138,6 @@ class FederationSendServlet(BaseFederationServerServlet):
class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
CATEGORY = "Federation requests"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(
@@ -155,7 +152,6 @@ class FederationEventServlet(BaseFederationServerServlet):
class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
CATEGORY = "Federation requests"
# This is when someone asks for all data for a given room.
async def on_GET(
@@ -174,7 +170,6 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -192,7 +187,6 @@ class FederationStateIdsServlet(BaseFederationServerServlet):
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -231,7 +225,6 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
"""
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -253,7 +246,6 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
CATEGORY = "Federation requests"
# This is when we receive a server-server Query
async def on_GET(
@@ -270,7 +262,6 @@ class FederationQueryServlet(BaseFederationServerServlet):
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -306,7 +297,6 @@ class FederationMakeJoinServlet(BaseFederationServerServlet):
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -322,7 +312,6 @@ class FederationMakeLeaveServlet(BaseFederationServerServlet):
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -338,7 +327,6 @@ class FederationV1SendLeaveServlet(BaseFederationServerServlet):
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
@@ -356,7 +344,6 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet):
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -379,7 +366,6 @@ class FederationMakeKnockServlet(BaseFederationServerServlet):
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -395,7 +381,6 @@ class FederationV1SendKnockServlet(BaseFederationServerServlet):
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -410,7 +395,6 @@ class FederationEventAuthServlet(BaseFederationServerServlet):
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -428,7 +412,6 @@ class FederationV1SendJoinServlet(BaseFederationServerServlet):
class FederationV2SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
@@ -472,7 +455,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -497,7 +479,6 @@ class FederationV1InviteServlet(BaseFederationServerServlet):
class FederationV2InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
@@ -534,7 +515,6 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -549,7 +529,6 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
CATEGORY = "Federation requests"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
@@ -559,7 +538,6 @@ class FederationClientKeysQueryServlet(BaseFederationServerServlet):
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -573,54 +551,16 @@ class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
CATEGORY = "Federation requests"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
# Generate a count for each algorithm, which is hard-coded to 1.
key_query: List[Tuple[str, str, str, int]] = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
key_query.append((user_id, device_id, algorithm, 1))
response = await self.handler.on_claim_client_keys(
key_query, always_include_fallback_keys=False
)
return 200, response
class FederationUnstableClientKeysClaimServlet(BaseFederationServerServlet):
"""
Identical to the stable endpoint (FederationClientKeysClaimServlet) except
it allows for querying for multiple OTKs at once and always includes fallback
keys in the response.
"""
PREFIX = FEDERATION_UNSTABLE_PREFIX
PATH = "/user/keys/claim"
CATEGORY = "Federation requests"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
# Generate a count for each algorithm.
key_query: List[Tuple[str, str, str, int]] = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithms in device_keys.items():
counts = Counter(algorithms)
for algorithm, count in counts.items():
key_query.append((user_id, device_id, algorithm, count))
response = await self.handler.on_claim_client_keys(
key_query, always_include_fallback_keys=True
)
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
PATH = "/get_missing_events/(?P<room_id>[^/]*)"
CATEGORY = "Federation requests"
async def on_POST(
self,
@@ -646,7 +586,6 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
class On3pidBindServlet(BaseFederationServerServlet):
PATH = "/3pid/onbind"
CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -679,7 +618,6 @@ class On3pidBindServlet(BaseFederationServerServlet):
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -702,7 +640,6 @@ class FederationVersionServlet(BaseFederationServlet):
class FederationRoomHierarchyServlet(BaseFederationServlet):
PATH = "/hierarchy/(?P<room_id>[^/]*)"
CATEGORY = "Federation requests"
def __init__(
self,
@@ -735,7 +672,6 @@ class RoomComplexityServlet(BaseFederationServlet):
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
CATEGORY = "Federation requests (unstable)"
def __init__(
self,
@@ -821,7 +757,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationUnstableClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,

Some files were not shown because too many files have changed in this diff Show More