1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Andrew Morgan
1e64f9c255 Add support for devenv developer environments 2023-01-18 15:36:35 +00:00
265 changed files with 4313 additions and 8036 deletions

View File

@@ -50,16 +50,7 @@ def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
check_is_abi3_compatible(wheel_file)
# HACK: it seems that some older versions of pip will consider a wheel marked
# as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
# here; there are some clues in
# https://github.com/pantsbuild/pants/pull/12857
# https://github.com/pypa/pip/issues/9138
# https://github.com/pypa/packaging/pull/319
# Empirically this seems to work, note that macOS 11 and 10.16 are the same,
# both versions are valid for backwards compatibility.
platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
abi3_tag = Tag(tag.interpreter, "abi3", platform)
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
dirname = os.path.dirname(wheel_file)
new_wheel_file = os.path.join(

View File

@@ -1,23 +0,0 @@
#! /usr/bin/env python
import sys
if sys.version_info < (3, 11):
raise RuntimeError("Requires at least Python 3.11, to import tomllib")
import tomllib
with open("poetry.lock", "rb") as f:
lockfile = tomllib.load(f)
try:
lock_version = lockfile["metadata"]["lock-version"]
assert lock_version == "2.0"
except Exception:
print(
"""\
Lockfile is not version 2.0. You probably need to upgrade poetry on your local box
and re-run `poetry lock --no-update`. See the Poetry cheat sheet at
https://matrix-org.github.io/synapse/develop/development/dependencies.html
"""
)
raise

View File

@@ -53,7 +53,7 @@ with open('pyproject.toml', 'w') as f:
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pip install poetry==1.3.2
pip install poetry==1.2.0
poetry lock
echo "::group::Patched pyproject.toml"

View File

@@ -23,9 +23,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-dat
--output-directory /tmp/export_data
# Test that the output directory exists and contains the rooms directory
dir_r="/tmp/export_data/rooms"
dir_u="/tmp/export_data/user_data"
if [ -d "$dir_r" ] && [ -d "$dir_u" ]; then
dir="/tmp/export_data/rooms"
if [ -d "$dir" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a sqlite database."
@@ -44,9 +43,8 @@ poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-d
--output-directory /tmp/export_data2
# Test that the output directory exists and contains the rooms directory
dir_r2="/tmp/export_data2/rooms"
dir_u2="/tmp/export_data2/user_data"
if [ -d "$dir_r2" ] && [ -d "$dir_u2" ]; then
dir2="/tmp/export_data2/rooms"
if [ -d "$dir2" ]; then
echo "Command successful, this test passes"
else
echo "No output directories found, the command fails against a postgres database."

View File

@@ -48,7 +48,7 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v4
uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"

View File

@@ -58,7 +58,7 @@ jobs:
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
uses: peaceiris/actions-gh-pages@64b46b4226a4a12da2239ba3ea5aa73e3163c75b # v3.9.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book

View File

@@ -27,7 +27,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -37,7 +37,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
poetry-version: "1.3.2"
poetry-version: "1.2.0"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
@@ -61,7 +61,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -134,7 +134,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2

View File

@@ -127,7 +127,7 @@ jobs:
python-version: "3.x"
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
@@ -148,7 +148,7 @@ jobs:
env:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp3*-* *i686* *musl*
CIBW_TEST_SKIP: pp3{7,9}-* *i686* *musl*
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI

View File

@@ -33,10 +33,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: matrix-org/setup-python-poetry@v1
- uses: actions/setup-python@v4
with:
python-version: "3.x"
poetry-version: "1.3.2"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
@@ -51,15 +52,6 @@ jobs:
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
check-lockfile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: .ci/scripts/check_lockfile.py
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
with:
@@ -96,7 +88,6 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
@@ -112,7 +103,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
components: clippy
@@ -134,7 +125,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: nightly-2022-12-01
components: clippy
@@ -154,7 +145,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
components: rustfmt
@@ -172,7 +163,6 @@ jobs:
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- check-lockfile
- lint-clippy
- lint-rustfmt
runs-on: ubuntu-latest
@@ -221,7 +211,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -229,7 +219,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
@@ -266,7 +255,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -305,7 +294,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: '3.7'
poetry-version: "1.3.2"
extras: "all test"
- run: poetry run trial -j6 tests
@@ -340,7 +328,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
@@ -386,7 +373,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -432,7 +419,6 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
@@ -484,7 +470,6 @@ jobs:
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.3.2"
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
@@ -531,7 +516,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -541,11 +526,8 @@ jobs:
- run: |
set -o pipefail
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
name: Run Complement Tests
cargo-test:
@@ -562,36 +544,13 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
# We want to ensure that the cargo benchmarks still compile, which requires a
# nightly compiler.
cargo-bench:
if: ${{ needs.changes.outputs.rust == 'true' }}
runs-on: ubuntu-latest
needs:
- linting-done
- changes
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2
- run: cargo bench --no-run
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
@@ -603,7 +562,6 @@ jobs:
- portdb
- complement
- cargo-test
- cargo-bench
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -615,4 +573,3 @@ jobs:
skippable: |
lint-newsfile
cargo-test
cargo-bench

View File

@@ -18,7 +18,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -43,7 +43,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -82,7 +82,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -148,7 +148,7 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.3.2
pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk

5
.gitignore vendored
View File

@@ -72,3 +72,8 @@ book/
# Don't include users' poetry configs
/poetry.toml
# Devenv
.devenv*
devenv.local.nix

View File

@@ -1,238 +1,3 @@
Synapse 1.77.0 (2023-02-14)
===========================
No significant changes since 1.77.0rc2.
Synapse 1.77.0rc2 (2023-02-10)
==============================
Bugfixes
--------
- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024))
Internal Changes
----------------
- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036))
Synapse 1.77.0rc1 (2023-02-07)
==============================
Features
--------
- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958))
- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016))
- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894))
- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962))
- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866))
- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880))
- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915))
- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926))
- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942))
- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944))
- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947))
- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976))
- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945))
Internal Changes
----------------
- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007))
- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922))
- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858))
- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970))
- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916))
- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920))
- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019))
- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953))
- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954))
- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950))
- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965))
- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014))
- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002))
<details><summary>Dependabot updates</summary>
- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968))
- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952))
- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935))
- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936))
- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937))
- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938))
- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939))
- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993))
- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994))
- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995))
- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996))
- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997))
- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998))
- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999))
</details>
Synapse 1.76.0 (2023-01-31)
===========================
The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details.
The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams).
Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132).
Notes on faster joins
---------------------
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
After a faster join, Synapse considers that room "partially joined". In this state, you should be able to
- read incoming messages;
- see incoming state changes, e.g. room topic changes; and
- send messages, if the room is unencrypted.
Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to
- send messages, if the room is in encrypted;
- retrieve room history from before your join, if permitted by the room settings; and
- access the full list of room members.
Improved Documentation
----------------------
- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677))
Synapse 1.76.0rc2 (2023-01-27)
==============================
Bugfixes
--------
- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914))
Internal Changes
----------------
- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917))
Synapse 1.76.0rc1 (2023-01-25)
==============================
Features
--------
- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111))
- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629))
- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747))
- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775))
- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787))
- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811))
- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839))
- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870))
- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905))
Bugfixes
--------
- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799))
- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842))
- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820))
- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864))
- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873))
- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874))
- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882))
- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910))
Updates to the Docker image
---------------------------
- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875))
Improved Documentation
----------------------
- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667))
- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773))
- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803))
- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818))
- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824))
- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845))
- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868))
- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883))
Deprecations and Removals
-------------------------
- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860))
Internal Changes
----------------
- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749))
- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752))
- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804))
- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807))
- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889))
- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819))
- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821))
- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822))
- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825))
- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833))
- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841))
- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843))
- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844))
- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855))
- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872))
- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877))
- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881))
- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912))
- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848))
- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861))
- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862))
- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863))
- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896))
- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897))
- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899))
- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900))
- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901))
Synapse 1.75.0 (2023-01-17)
===========================
No significant changes since 1.75.0rc2.
Synapse 1.75.0rc2 (2023-01-12)
==============================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.75.0rc1 where device lists could be miscalculated with some sync filters. ([\#14810](https://github.com/matrix-org/synapse/issues/14810))
- Fix race where calling `/members` or `/state` with an `at` parameter could fail for newly created rooms, when using multiple workers. ([\#14817](https://github.com/matrix-org/synapse/issues/14817))
Synapse 1.75.0rc1 (2023-01-10)
==============================

12
Cargo.lock generated
View File

@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.69"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61"
[[package]]
name = "arc-swap"
@@ -294,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.7.1"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.92"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
dependencies = [
"itoa",
"ryu",

View File

@@ -0,0 +1 @@
Add a dedicated listener configuration for `health` endpoint.

1
changelog.d/14749.misc Normal file
View File

@@ -0,0 +1 @@
Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state.

1
changelog.d/14773.doc Normal file
View File

@@ -0,0 +1 @@
Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead.

1
changelog.d/14799.bugfix Normal file
View File

@@ -0,0 +1 @@
Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room.

1
changelog.d/14803.doc Normal file
View File

@@ -0,0 +1 @@
Add missing documentation for `tag` to `listeners` section.

1
changelog.d/14807.misc Normal file
View File

@@ -0,0 +1 @@
Add local poetry config files (`poetry.toml`) to `.gitignore`.

View File

@@ -0,0 +1 @@
Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement.

1
changelog.d/14816.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints.

View File

@@ -1,47 +0,0 @@
# `lnav` config for Synapse logs
[lnav](https://lnav.org/) is a log-viewing tool. It is particularly useful when
you need to interleave multiple log files, or for exploring a large log file
with regex filters. The downside is that it is not as ubiquitous as tools like
`less`, `grep`, etc.
This directory contains an `lnav` [log format definition](
https://docs.lnav.org/en/v0.10.1/formats.html#defining-a-new-format
) for Synapse logs as
emitted by Synapse with the default [logging configuration](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#log_config
). It supports lnav 0.10.1 because that's what's packaged by my distribution.
This should allow lnav:
- to interpret timestamps, allowing log interleaving;
- to interpret log severity levels, allowing colouring by log level(!!!);
- to interpret request IDs, allowing you to skip through a specific request; and
- to highlight room, event and user IDs in logs.
See also https://gist.github.com/benje/e2ab750b0a81d11920d83af637d289f7 for a
similar example.
## Example
[![asciicast](https://asciinema.org/a/556133.svg)](https://asciinema.org/a/556133)
## Tips
- `lnav -i /path/to/synapse/checkout/contrib/lnav/synapse-log-format.json`
- `lnav my_synapse_log_file` or `lnav synapse_log_files.*`, etc.
- `lnav --help` for CLI help.
Within lnav itself:
- `?` for help within lnav itself.
- `q` to quit.
- `/` to search a-la `less` and `vim`, then `n` and `N` to continue searching
down and up.
- Use `o` and `O` to skip through logs based on the request ID (`POST-1234`, or
else the value of the [`request_id_header`](
https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=request_id_header#listeners
) header). This may get confused if the same request ID is repeated among
multiple files or process restarts.
- ???
- Profit

View File

@@ -1,67 +0,0 @@
{
"$schema": "https://lnav.org/schemas/format-v1.schema.json",
"synapse": {
"title": "Synapse logs",
"description": "Logs output by Synapse, a Matrix homesever, under its default logging config.",
"regex": {
"log": {
"pattern": ".*(?<timestamp>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) - (?<logger>.+) - (?<lineno>\\d+) - (?<level>\\w+) - (?<context>.+) - (?<body>.*)"
}
},
"json": false,
"timestamp-field": "timestamp",
"timestamp-format": [
"%Y-%m-%d %H:%M:%S,%L"
],
"level-field": "level",
"body-field": "body",
"opid-field": "context",
"level": {
"critical": "CRITICAL",
"error": "ERROR",
"warning": "WARNING",
"info": "INFO",
"debug": "DEBUG"
},
"sample": [
{
"line": "my-matrix-server-generic-worker-4 | 2023-01-27 09:47:09,818 - synapse.replication.tcp.client - 381 - ERROR - PUT-32992 - Timed out waiting for stream receipts",
"level": "error"
},
{
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{
"line": "my-matrix-server | 2023-01-25 20:55:54,433 - synapse.storage.databases - 66 - INFO - main - [database config 'master']: Checking database server",
"level": "info"
},
{
"line": "my-matrix-server | 2023-01-26 15:08:40,447 - synapse.access.http.8008 - 460 - INFO - PUT-74929 - 0.0.0.0 - 8008 - {@alice:example.com} Processed request: 0.011sec/0.000sec (0.000sec, 0.000sec) (0.001sec/0.008sec/3) 2B 200 \"PUT /_matrix/client/r0/user/%40alice%3Atexample.com/account_data/im.vector.setting.breadcrumbs HTTP/1.0\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Element/1.11.20 Chrome/108.0.5359.179 Electron/22.0.3 Safari/537.36\" [0 dbevts]",
"level": "info"
}
],
"highlights": {
"user_id": {
"pattern": "(@|%40)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_id": {
"pattern": "(!|%21)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"room_alias": {
"pattern": "(#|%23)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v1_v2": {
"pattern": "(\\$|%25)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(?<!:)",
"underline": true
},
"event_id_v3_plus": {
"pattern": "(\\$|%25)([A-Za-z0-9+/_]|-){43}",
"underline": true
}
}
}
}

View File

@@ -15,19 +15,19 @@ worker_name: generic_worker$i
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i
x_forwarded: true
resources:
- names: [client, federation]
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
#worker_pid_file: DATADIR/generic_worker$i.pid
EOF
done
```
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
Customise the script to your needs.

View File

@@ -8,9 +8,7 @@ It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
@@ -48,11 +46,9 @@ worker_listeners:
- type: http
port: $(expr $HTTP_START_PORT + $i)
x_forwarded: true
resources:
- names: [client]
#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
@@ -95,9 +91,7 @@ Simply run the script to create YAML files in the current folder and print out t
```console
$ ./create_stream_writers.sh
```
You should receive an output similar to the following:
```console
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.

View File

@@ -31,11 +31,12 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
esac
# Manually install Poetry and export a pip-compatible `requirements.txt`
# We need a Poetry pre-release as the export command is buggy in < 1.2
TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.3.2
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \

49
debian/changelog vendored
View File

@@ -1,52 +1,3 @@
matrix-synapse-py3 (1.77.0) stable; urgency=medium
* New Synapse release 1.77.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Feb 2023 12:59:02 +0100
matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium
* New Synapse release 1.77.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 10 Feb 2023 12:44:21 +0000
matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium
* New Synapse release 1.77.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Feb 2023 13:45:14 +0000
matrix-synapse-py3 (1.76.0) stable; urgency=medium
* New Synapse release 1.76.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Jan 2023 08:21:47 -0800
matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium
* New Synapse release 1.76.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 27 Jan 2023 11:17:57 +0000
matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium
* Use Poetry 1.3.2 to manage the bundled virtualenv included with this package.
* New Synapse release 1.76.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 25 Jan 2023 16:21:16 +0000
matrix-synapse-py3 (1.75.0) stable; urgency=medium
* New Synapse release 1.75.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jan 2023 11:36:02 +0000
matrix-synapse-py3 (1.75.0~rc2) stable; urgency=medium
* New Synapse release 1.75.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 12 Jan 2023 10:30:15 -0800
matrix-synapse-py3 (1.75.0~rc1) stable; urgency=medium
* New Synapse release 1.75.0rc1.

177
devenv.lock Normal file
View File

@@ -0,0 +1,177 @@
{
"nodes": {
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1673960114,
"narHash": "sha256-YNCok1a8cy71nP0idJds2Dwn2B1T6zGw9+2H1A0lNa0=",
"owner": "cachix",
"repo": "devenv",
"rev": "0960585a7221e6ede718cc9a2c2eade7ce75c229",
"type": "github"
},
"original": {
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1674023045,
"narHash": "sha256-btQC+gTeVLmX9cYl/6Kig7BuDltVWJEh7TrITAc6QjA=",
"owner": "nix-community",
"repo": "fenix",
"rev": "75dbe699bc57323cdef636b82bcfef6028bd1530",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1668681692,
"narHash": "sha256-Ht91NGdewz8IQLtWZ9LCeNXMSXHUss+9COoqu6JLmXU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "009399224d5e398d03b22badca40a37ac85412a1",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1660459072,
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1673947312,
"narHash": "sha256-xx/2nRwRy3bXrtry6TtydKpJpqHahjuDB5sFkQ/XNDE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2d38b664b4400335086a713a0036aafaa002c003",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1671271954,
"narHash": "sha256-cSvu+bnvN08sOlTBWbBrKaBHQZq8mvk8bgpt0ZJ2Snc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d513b448cc2a6da2c8803e3c197c9fc7e67b19e3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.05",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1674046351,
"narHash": "sha256-vNErPj4gfO/G1vHuOh5/IbjLaydwePcRlD0fXlnUbmI=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "756cc26afb75b0f8bfec48bbc54a8836a04953fb",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1673979364,
"narHash": "sha256-ecgQENol9XhhcYF+M9B8FMrsWYQ/ZvRsvgEWi8HI6D0=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "3a7271336536f2cd558498755254ae8c0e73baa7",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

122
devenv.nix Normal file
View File

@@ -0,0 +1,122 @@
{ inputs, pkgs, ... }:
{
# Configure packages to install.
# Search for package names at https://search.nixos.org/packages?channel=unstable
packages = with pkgs; [
# Native dependencies for running Synapse.
icu
libffi
libjpeg
libpqxx
libwebp
libxml2
libxslt
sqlite
# Native dependencies for unit tests (SyTest also requires OpenSSL).
openssl
# Native dependencies for running Complement.
olm
# Development tools.
poetry
];
# Activate (and create if necessary) a poetry virtualenv on startup.
enterShell = ''
. "$(dirname $(poetry run which python))/activate"
'';
# Install dependencies for the additional programming languages
# involved with Synapse development. Python is already available
# from poetry's virtual environment.
#
# * Rust is used for developing and running Synapse.
# * Golang is needed to run the Complement test suite.
# * Perl is needed to run the SyTest test suite.
languages.go.enable = true;
languages.rust.enable = true;
languages.rust.version = "latest";
languages.perl.enable = true;
# Postgres is needed to run Synapse with postgres support and
# to run certain unit tests that require postgres.
services.postgres.enable = true;
# On the first invocation of `devenv up`, create a database for
# Syanpse to store data in.
services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"];
services.postgres.initialDatabases = [
{ name = "synapse"; }
];
# Redis is needed in order to run Synapse in worker mode.
services.redis.enable = true;
# We wrap `poetry` with a bash script that disables the download
# of binary wheels for certain packages if the user is running
# NixOS. NixOS is special in that you can have multiple versions
# of packages installed at once, including your libc linker!
#
# Some binaries built for Linux expect those to be in a certain
# filepath, but that is not the case on NixOS. In that case, we
# force compiling those binaries locally instead.
scripts.poetry.exec = ''
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
# We are running on NixOS.
#
# Prevent poetry from downloading known problematic,
# dynamically-linked binaries for python dependencies.
POETRY_INSTALLER_NO_BINARY=ruff ${pkgs.poetry}/bin/poetry $@
else
${pkgs.poetry}/bin/poetry $@
fi
'';
# Define the perl modules we require to run SyTest.
#
# This list was compiled by cross-referencing https://metacpan.org/
# with the modules defined in './cpanfile' and then finding the
# corresponding nix packages on https://search.nixos.org/packages.
#
# This was done until `./install-deps.pl --dryrun` produced no output.
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
DBI
ClassMethodModifiers
CryptEd25519
DataDump
DBDPg
DigestHMAC
DigestSHA1
EmailAddressXS
EmailMIME
EmailSimple # required by Email::Mime
EmailMessageID # required by Email::Mime
EmailMIMEContentType # required by Email::Mime
TextUnidecode # required by Email::Mime
ModuleRuntime # required by Email::Mime
EmailMIMEEncodings # required by Email::Mime
FilePath
FileSlurper
Future
GetoptLong
HTTPMessage
IOAsync
IOAsyncSSL
IOSocketSSL
NetSSLeay
JSON
ListUtilsBy
ScalarListUtils
ModulePluggable
NetAsyncHTTP
MetricsAny # required by Net::Async::HTTP
NetAsyncHTTPServer
StructDumb
URI
YAMLLibYAML
]}";
}

8
devenv.yaml Normal file
View File

@@ -0,0 +1,8 @@
inputs:
nixpkgs:
url: github:NixOS/nixpkgs/nixpkgs-unstable
fenix:
url: github:nix-community/fenix
inputs:
nixpkgs:
follows: nixpkgs

View File

@@ -17,10 +17,16 @@
# Irritatingly, there is no blessed guide on how to distribute an application with its
# poetry-managed environment in a docker image. We have opted for
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
# `poetry export | pip install -r /dev/stdin`, but there are known bugs in
# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released.
# In case we get bitten by those bugs in the future, the recommendations here might
# be useful:
# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865
# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc
ARG PYTHON_VERSION=3.11
ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
@@ -34,16 +40,16 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
# Here we use it to set up a cache for apt (and below for pip), to improve
# rebuild speeds on slow connections.
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry==1.3.2"
pip install --user "poetry==1.2.0"
WORKDIR /synapse
@@ -64,9 +70,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
touch /synapse/requirements.txt; \
touch /synapse/requirements.txt; \
fi
###
@@ -76,24 +82,24 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
openssl \
zlib1g-dev \
git \
curl \
libicu-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install rust and ensure its in the PATH
@@ -134,9 +140,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
fi
###
@@ -151,20 +157,20 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git
LABEL org.opencontainers.image.licenses='Apache-2.0'
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
curl \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local
COPY ./docker/start.py /start.py
@@ -175,4 +181,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1
CMD curl -fSs http://localhost:8008/health || exit 1

View File

@@ -6,7 +6,7 @@ set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS"
function log {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
@@ -76,17 +76,6 @@ else
fi
if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then
if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then
export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1"
else
export SYNAPSE_ASYNC_IO_REACTOR="1"
fi
else
export SYNAPSE_ASYNC_IO_REACTOR="0"
fi
# Add Complement's appservice registration directory, if there is one
# (It can be absent when there are no application services in this test!)
if [ -d /complement/appservice ]; then

View File

@@ -94,16 +94,16 @@ allow_device_name_lookup_over_federation: true
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
msc3706_enabled: true
{% if not workers_in_use %}
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
msc3381_polls_enabled: true
# Enable deleting device-specific notification settings stored in account data
msc3890_enabled: true
# Enable removing account data support
msc3391_enabled: true
{% endif %}
# Filtering /messages by relation type.
msc3874_enabled: true
# Enable removing account data support
msc3391_enabled: true
server_notices:
system_mxid_localpart: _server

View File

@@ -97,7 +97,6 @@
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)
- [Single Sign-On]()
- [SAML](development/saml.md)

View File

@@ -15,7 +15,6 @@ app_service_config_files:
The format of the AS configuration file is as follows:
```yaml
id: <your-AS-id>
url: <base url of AS>
as_token: <token AS will add to requests to HS>
hs_token: <token HS will add to requests to AS>

View File

@@ -67,7 +67,7 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Developing Synapse requires Poetry version 1.3.2 or later.
Synapse requires Poetry version 1.2.0 or later.
Next, open a terminal and install dependencies as follows:
@@ -332,7 +332,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
[here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh

View File

@@ -2,13 +2,6 @@
This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
# Installing
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
Developers should use Poetry 1.3.2 or higher. If you encounter problems related
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
# Background
Synapse uses a variety of third-party Python packages to function as a homeserver.
@@ -130,7 +123,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
## ...reset my venv to the locked environment?
```shell
poetry install --all-extras --sync
poetry install --extras all --remove-untracked
```
## ...delete everything and start over from scratch?
@@ -190,6 +183,7 @@ Either:
- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
`--extras` and `--optional` flags in particular.
- **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
@@ -202,7 +196,7 @@ poetry remove packagename
```
ought to do the trick. Alternatively, manually update `pyproject.toml` and
`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock`
`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
files in your commit.
## ...update the version range for an existing dependency?
@@ -246,6 +240,9 @@ poetry export --extras all
Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
be required.
## ...build a test wheel?
I usually use
@@ -258,26 +255,12 @@ because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.
## ...handle a Dependabot pull request?
Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it
creates a pull request a GitHub Action will run to automatically create a changelog
file. Ensure that:
* the lockfile changes look reasonable;
* the upstream changelog file (linked in the description) doesn't include any
breaking changes;
* continuous integration passes (due to permissions, the GitHub Actions run on
the changelog commit will fail, look at the initial commit of the pull request);
In particular, any updates to the type hints (usually packages which start with `types-`)
should be safe to merge if linting passes.
# Troubleshooting
## Check the version of poetry with `poetry --version`.
The minimum version of poetry supported by Synapse is 1.3.2.
The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep

View File

@@ -1,375 +0,0 @@
# How do faster joins work?
This is a work-in-progress set of notes with two goals:
- act as a reference, explaining how Synapse implements faster joins; and
- record the rationale behind our choices.
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
## Overview: processing events in a partially-joined room
The response to a partial join consists of
- the requested join event `J`,
- a list of the servers in the room (according to the state before `J`),
- a subset of the state of the room before `J`,
- the full auth chain of that state subset.
Synapse marks the room as partially joined by adding a row to the database table
`partial_state_rooms`. It also marks the join event `J` as "partially stated",
meaning that we have neither received nor computed the full state before/after
`J`. This is done by adding a row to `partial_state_events`.
<details><summary>DB schema</summary>
```
matrix=> \d partial_state_events
Table "matrix.partial_state_events"
Column │ Type │ Collation │ Nullable │ Default
══════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
event_id │ text │ │ not null │
matrix=> \d partial_state_rooms
Table "matrix.partial_state_rooms"
Column │ Type │ Collation │ Nullable │ Default
════════════════════════╪════════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
device_lists_stream_id │ bigint │ │ not null │ 0
join_event_id │ text │ │ │
joined_via │ text │ │ │
matrix=> \d partial_state_rooms_servers
Table "matrix.partial_state_rooms_servers"
Column │ Type │ Collation │ Nullable │ Default
═════════════╪══════╪═══════════╪══════════╪═════════
room_id │ text │ │ not null │
server_name │ text │ │ not null │
```
Indices, foreign-keys and check constraints are omitted for brevity.
</details>
While partially joined to a room, Synapse receives events `E` from remote
homeservers as normal, and can create events at the request of its local users.
However, we run into trouble when we enforce the [checks on an event].
> 1. Is a valid event, otherwise it is dropped. For an event to be valid, it
must contain a room_id, and it must comply with the event format of that
> room version.
> 2. Passes signature checks, otherwise it is dropped.
> 3. Passes hash checks, otherwise it is redacted before being processed further.
> 4. Passes authorization rules based on the events auth events, otherwise it
> is rejected.
> 5. **Passes authorization rules based on the state before the event, otherwise
> it is rejected.**
> 6. **Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.**
[checks on an event]: https://spec.matrix.org/v1.5/server-server-api/#checks-performed-on-receipt-of-a-pdu
We can enforce checks 1--4 without any problems.
But we cannot enforce checks 5 or 6 with complete certainty, since Synapse does
not know the full state before `E`, nor that of the room.
### Partial state
Instead, we make a best-effort approximation.
While the room is considered partially joined, Synapse tracks the "partial
state" before events.
This works in a similar way as regular state:
- The partial state before `J` is that given to us by the partial join response.
- The partial state before an event `E` is the resolution of the partial states
after each of `E`'s `prev_event`s.
- If `E` is rejected or a message event, the partial state after `E` is the
partial state before `E`.
- Otherwise, the partial state after `E` is the partial state before `E`, plus
`E` itself.
More concisely, partial state propagates just like full state; the only
difference is that we "seed" it with an incomplete initial state.
Synapse records that we have only calculated partial state for this event with
a row in `partial_state_events`.
While the room remains partially stated, check 5 on incoming events to that
room becomes:
> 5. Passes authorization rules based on **the resolution between the partial
> state before `E` and `E`'s auth events.** If the event fails to pass
> authorization rules, it is rejected.
Additionally, check 6 is deleted: no soft-failures are enforced.
While partially joined, the current partial state of the room is defined as the
resolution across the partial states after all forward extremities in the room.
_Remark._ Events with partial state are _not_ considered
[outliers](../room-dag-concepts.md#outliers).
### Approximation error
Using partial state means the auth checks can fail in a few different ways[^2].
[^2]: Is this exhaustive?
- We may erroneously accept an incoming event in check 5 based on partial state
when it would have been rejected based on full state, or vice versa.
- This means that an event could erroneously be added to the current partial
state of the room when it would not be present in the full state of the room,
or vice versa.
- Additionally, we may have skipped soft-failing an event that would have been
soft-failed based on full state.
(Note that the discrepancies described in the last two bullets are user-visible.)
This means that we have to be very careful when we want to lookup pieces of room
state in a partially-joined room. Our approximation of the state may be
incorrect or missing. But we can make some educated guesses. If
- our partial state is likely to be correct, or
- the consequences of our partial state being incorrect are minor,
then we proceed as normal, and let the resync process fix up any mistakes (see
below).
When is our partial state likely to be correct?
- It's more accurate the closer we are to the partial join event. (So we should
ideally complete the resync as soon as possible.)
- Non-member events: we will have received them as part of the partial join
response, if they were part of the room state at that point. We may
incorrectly accept or reject updates to that state (at first because we lack
remote membership information; later because of compounding errors), so these
can become incorrect over time.
- Local members' memberships: we are the only ones who can create join and
knock events for our users. We can't be completely confident in the
correctness of bans, invites and kicks from other homeservers, but the resync
process should correct any mistakes.
- Remote members' memberships: we did not receive these in the /send_join
response, so we have essentially no idea if these are correct or not.
In short, we deem it acceptable to trust the partial state for non-membership
and local membership events. For remote membership events, we wait for the
resync to complete, at which point we have the full state of the room and can
proceed as normal.
### Fixing the approximation with a resync
The partial-state approximation is only a temporary affair. In the background,
synapse beings a "resync" process. This is a continuous loop, starting at the
partial join event and proceeding downwards through the event graph. For each
`E` seen in the room since partial join, Synapse will fetch
- the event ids in the state of the room before `E`, via
[`/state_ids`](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1state_idsroomid);
- the event ids in the full auth chain of `E`, included in the `/state_ids`
response; and
- any events from the previous two bullets that Synapse hasn't persisted, via
[`/state](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1stateroomid).
This means Synapse has (or can compute) the full state before `E`, which allows
Synapse to properly authorise or reject `E`. At this point ,the event
is considered to have "full state" rather than "partial state". We record this
by removing `E` from the `partial_state_events` table.
\[**TODO:** Does Synapse persist a new state group for the full state
before `E`, or do we alter the (partial-)state group in-place? Are state groups
ever marked as partially-stated? \]
This scheme means it is possible for us to have accepted and sent an event to
clients, only to reject it during the resync. From a client's perspective, the
effect is similar to a retroactive
state change due to state resolution---i.e. a "state reset".[^3]
[^3]: Clients should refresh caches to detect such a change. Rumour has it that
sliding sync will fix this.
When all events since the join `J` have been fully-stated, the room resync
process is complete. We record this by removing the room from
`partial_state_rooms`.
## Faster joins on workers
For the time being, the resync process happens on the master worker.
A new replication stream `un_partial_stated_room` is added. Whenever a resync
completes and a partial-state room becomes fully stated, a new message is sent
into that stream containing the room ID.
## Notes on specific cases
> **NB.** The notes below are rough. Some of them are hidden under `<details>`
disclosures because they have yet to be implemented in mainline Synapse.
### Creating events during a partial join
When sending out messages during a partial join, we assume our partial state is
accurate and proceed as normal. For this to have any hope of succeeding at all,
our partial state must contain an entry for each of the (type, state key) pairs
[specified by the auth rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules):
- `m.room.create`
- `m.room.join_rules`
- `m.room.power_levels`
- `m.room.third_party_invite`
- `m.room.member`
The first four of these should be present in the state before `J` that is given
to us in the partial join response; only membership events are omitted. In order
for us to consider the user joined, we must have their membership event. That
means the only possible omission is the target's membership in an invite, kick
or ban.
The worst possibility is that we locally invite someone who is banned according to
the full state, because we lack their ban in our current partial state. The rest
of the federation---at least, those who are fully joined---should correctly
enforce the [membership transition constraints](
https://spec.matrix.org/v1.3/client-server-api/#room-membership
). So any the erroneous invite should be ignored by fully-joined
homeservers and resolved by the resync for partially-joined homeservers.
In more generality, there are two problems we're worrying about here:
- We might create an event that is valid under our partial state, only to later
find out that is actually invalid according to the full state.
- Or: we might refuse to create an event that is invalid under our partial
state, even though it would be perfectly valid under the full state.
However we expect such problems to be unlikely in practise, because
- We trust that the room has sensible power levels, e.g. that bad actors with
high power levels are demoted before their ban.
- We trust that the resident server provides us up-to-date power levels, join
rules, etc.
- State changes in rooms are relatively infrequent, and the resync period is
relatively quick.
#### Sending out the event over federation
**TODO:** needs prose fleshing out.
Normally: send out in a fed txn to all HSes in the room.
We only know that some HSes were in the room at some point. Wat do.
Send it out to the list of servers from the first join.
**TODO** what do we do here if we have full state?
If the prev event was created by us, we can risk sending it to the wrong HS. (Motivation: privacy concern of the content. Not such a big deal for a public room or an encrypted room. But non-encrypted invite-only...)
But don't want to send out sensitive data in other HS's events in this way.
Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do.
What about if we didn't send them an event but shouldn't've?
E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them.
Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left.
Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?)
- Don't do this currently.
- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages
- Gap: rooms which are infrequently used and take a long time to resync.
### Joining after a partial join
**NB.** Not yet implemented.
<details>
**TODO:** needs prose fleshing out. Liase with Matthieu. Explain why /send_join
(Rich was surprised we didn't just create it locally. Answer: to try and avoid
a join which then gets rejected after resync.)
We don't know for sure that any join we create would be accepted.
E.g. the joined user might have been banned; the join rules might have changed in a way that we didn't realise... some way in which the partial state was mistaken.
Instead, do another partial make-join/send-join handshake to confirm that the join works.
- Probably going to get a bunch of duplicate state events and auth events.... but the point of partial joins is that these should be small. Many are already persisted = good.
- What if the second send_join response includes a different list of reisdent HSes? Could ignore it.
- Could even have a special flag that says "just make me a join", i.e. don't bother giving me state or servers in room. Deffo want the auth chain tho.
- SQ: wrt device lists it's a lot safer to ignore it!!!!!
- What if the state at the second join is inconsistent with what we have? Ignore it?
</details>
### Leaving (and kicks and bans) after a partial join
**NB.** Not yet implemented.
<details>
When you're fully joined to a room, to have `U` leave a room their homeserver
needs to
- create a new leave event for `U` which will be accepted by other homeservers,
and
- send that event `U` out to the homeservers in the federation.
When is a leave event accepted? See
[v10 auth rules](https://spec.matrix.org/v1.5/rooms/v10/#authorization-rules):
> 4. If type is m.room.member: [...]
>
> 5. If membership is leave:
>
> 1. If the sender matches state_key, allow if and only if that users current membership state is invite, join, or knock.
> 2. [...]
I think this means that (well-formed!) self-leaves are governed entirely by
4.5.1. This means that if we correctly calculate state which says that `U` is
invited, joined or knocked and include it in the leave's auth events, our event
is accepted by checks 4 and 5 on incoming events.
> 4. Passes authorization rules based on the events auth events, otherwise
> it is rejected.
> 5. Passes authorization rules based on the state before the event, otherwise
> it is rejected.
The only way to fail check 6 is if the receiving server's current state of the
room says that `U` is banned, has left, or has no membership event. But this is
fine: the receiving server already thinks that `U` isn't in the room.
> 6. Passes authorization rules based on the current state of the room,
> otherwise it is “soft failed”.
For the second point (publishing the leave event), the best thing we can do is
to is publish to all HSes we know to be currently in the room. If they miss that
event, they might send us traffic in the room that we don't care about. This is
a problem with leaving after a "full" join; we don't seek to fix this with
partial joins.
(With that said: there's nothing machine-readable in the /send response. I don't
think we can deduce "destination has left the room" from a failure to /send an
event into that room?)
#### Can we still do this during a partial join?
We can create leave events and can choose what gets included in our auth events,
so we can be sure that we pass check 4 on incoming events. For check 5, we might
have an incorrect view of the state before an event.
The only way we might erroneously think a leave is valid is if
- the partial state before the leave has `U` joined, invited or knocked, but
- the full state before the leave has `U` banned, left or not present,
in which case the leave doesn't make anything worse: other HSes already consider
us as not in the room, and will continue to do so after seeing the leave.
The remaining obstacle is then: can we safely broadcast the leave event? We may
miss servers or incorrectly think that a server is in the room. Or the
destination server may be offline and miss the transaction containing our leave
event.This should self-heal when they see an event whose `prev_events` descends
from our leave.
Another option we considered was to use federation `/send_leave` to ask a
fully-joined server to send out the event on our behalf. But that introduces
complexity without much benefit. Besides, as Rich put it,
> sending out leaves is pretty best-effort currently
so this is probably good enough as-is.
#### Cleanup after the last leave
**TODO**: what cleanup is necessary? Is it all just nice-to-have to save unused
work?
</details>

View File

@@ -17,7 +17,6 @@ worker_listeners:
#
#- type: http
# port: 8035
# x_forwarded: true
# resources:
# - names: [client]

View File

@@ -5,10 +5,11 @@ worker_name: generic_worker1
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 8083
x_forwarded: true
resources:
- names: [client, federation]

View File

@@ -8,7 +8,6 @@ worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8085
x_forwarded: true
resources:
- names: [media]

View File

@@ -88,39 +88,6 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.76.0
## Faster joins are enabled by default
When joining a room for the first time, Synapse 1.76.0 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag.
Server admins can opt out of this feature for the time being by setting
```yaml
experimental:
faster_joins: false
```
in their server config.
## Changes to the account data replication streams
Synapse has changed the format of the account data and devices replication
streams (between workers). This is a forwards- and backwards-incompatible
change: v1.75 workers cannot process account data replicated by v1.76 workers,
and vice versa.
Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data
and device replication will resume as normal.
## Minimum version of Poetry is now 1.3.2
The minimum supported version of Poetry is now 1.3.2 (previously 1.2.0, [since
Synapse 1.67](#upgrading-to-v1670)). If you have used `poetry install` to
install Synapse from a source checkout, you should upgrade poetry: see its
[installation instructions](https://python-poetry.org/docs/#installation).
For all other installation methods, no acction is required.
# Upgrading to v1.74.0
## Unicode support in user search

View File

@@ -2,19 +2,13 @@
How do I become a server admin?
---
If your server already has an admin account you should use the
[User Admin API](../../admin_api/user_admin_api.md#change-whether-a-user-is-a-server-administrator-or-not)
to promote other accounts to become admins.
If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins.
If you don't have any admin accounts yet you won't be able to use the admin API,
so you'll have to edit the database manually. Manually editing the database is
generally not recommended so once you have an admin account: use the admin APIs
to make further changes.
If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes.
```sql
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
```
What servers are my server talking to?
---
Run this sql query on your db:
@@ -38,44 +32,6 @@ What users are registered on my server?
SELECT NAME from users;
```
How can I export user data?
---
Synapse includes a Python command to export data for a specific user. It takes the homeserver
configuration file and the full Matrix ID of the user to export:
```console
python -m synapse.app.admin_cmd -c <config_file> export-data <user_id> --output-directory <directory_path>
```
If you uses [Poetry](../../development/dependencies.md#managing-dependencies-with-poetry)
to run Synapse:
```console
poetry run python -m synapse.app.admin_cmd -c <config_file> export-data <user_id> --output-directory <directory_path>
```
The directory to store the export data in can be customised with the
`--output-directory` parameter; ensure that the provided directory is
empty. If this parameter is not provided, Synapse defaults to creating
a temporary directory (which starts with "synapse-exfiltrate") in `/tmp`,
`/var/tmp`, or `/usr/tmp`, in that order.
The exported data has the following layout:
```
output-directory
├───rooms
│ └───<room_id>
│ ├───events
│ ├───state
│ ├───invite_state
│ └───knock_state
└───user_data
├───connections
├───devices
└───profile
```
Manually resetting passwords
---
Users can reset their password through their client. Alternatively, a server admin
@@ -86,29 +42,21 @@ I have a problem with my server. Can I just delete my database and start again?
---
Deleting your database is unlikely to make anything better.
It's easy to make the mistake of thinking that you can start again from a clean
slate by dropping your database, but things don't work like that in a federated
network: lots of other servers have information about your server.
It's easy to make the mistake of thinking that you can start again from a clean slate by dropping your database, but things don't work like that in a federated network: lots of other servers have information about your server.
For example: other servers might think that you are in a room, your server will
think that you are not, and you'll probably be unable to interact with that room
in a sensible way ever again.
For example: other servers might think that you are in a room, your server will think that you are not, and you'll probably be unable to interact with that room in a sensible way ever again.
In general, there are better solutions to any problem than dropping the database.
Come and seek help in https://matrix.to/#/#synapse:matrix.org.
In general, there are better solutions to any problem than dropping the database. Come and seek help in https://matrix.to/#/#synapse:matrix.org.
There are two exceptions when it might be sensible to delete your database and start again:
* You have *never* joined any rooms which are federated with other servers. For
instance, a local deployment which the outside world can't talk to.
* You are changing the `server_name` in the homeserver configuration. In effect
this makes your server a completely new one from the point of view of the network,
so in this case it makes sense to start with a clean database.
* You have *never* joined any rooms which are federated with other servers. For instance, a local deployment which the outside world can't talk to.
* You are changing the `server_name` in the homeserver configuration. In effect this makes your server a completely new one from the point of view of the network, so in this case it makes sense to start with a clean database.
(In both cases you probably also want to clear out the media_store.)
I've stuffed up access to my room, how can I delete it to free up the alias?
---
Using the following curl command:
```console
```
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
```
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
@@ -119,25 +67,19 @@ Access Token:\<click to reveal\>
How can I find the lines corresponding to a given HTTP request in my homeserver log?
---
Synapse tags each log line according to the HTTP request it is processing. When
it finishes processing each request, it logs a line containing the words
`Processed request: `. For example:
Synapse tags each log line according to the HTTP request it is processing. When it finishes processing each request, it logs a line containing the words `Processed request: `. For example:
```
2019-02-14 22:35:08,196 - synapse.access.http.8008 - 302 - INFO - GET-37 - ::1 - 8008 - {@richvdh:localhost} Processed request: 0.173sec/0.001sec (0.002sec, 0.000sec) (0.027sec/0.026sec/2) 687B 200 "GET /_matrix/client/r0/sync HTTP/1.1" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36" [0 dbevts]"
```
Here we can see that the request has been tagged with `GET-37`. (The tag depends
on the method of the HTTP request, so might start with `GET-`, `PUT-`, `POST-`,
`OPTIONS-` or `DELETE-`.) So to find all lines corresponding to this request, we can do:
Here we can see that the request has been tagged with `GET-37`. (The tag depends on the method of the HTTP request, so might start with `GET-`, `PUT-`, `POST-`, `OPTIONS-` or `DELETE-`.) So to find all lines corresponding to this request, we can do:
```console
```
grep 'GET-37' homeserver.log
```
If you want to paste that output into a github issue or matrix room, please
remember to surround it with triple-backticks (```) to make it legible
(see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
What do all those fields in the 'Processed' line mean?
@@ -177,7 +119,7 @@ This is normally caused by a misconfiguration in your reverse-proxy. See [the re
Help!! Synapse is slow and eats all my RAM/CPU!
---
-----------------------------------------------
First, ensure you are running the latest version of Synapse, using Python 3
with a [PostgreSQL database](../../postgres.md).
@@ -219,7 +161,7 @@ in the Synapse config file: [see here](../configuration/config_documentation.md#
Running out of File Handles
---
---------------------------
If Synapse runs out of file handles, it typically fails badly - live-locking
at 100% CPU, and/or failing to accept new TCP connections (blocking the

View File

@@ -295,9 +295,7 @@ Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#comp
For example, for room version 1, `default_room_version` should be set
to "1".
Currently defaults to ["10"](https://spec.matrix.org/v1.5/rooms/v10/).
_Changed in Synapse 1.76:_ the default version room version was increased from [9](https://spec.matrix.org/v1.5/rooms/v9/) to [10](https://spec.matrix.org/v1.5/rooms/v10/).
Currently defaults to "9".
Example configuration:
```yaml
@@ -3474,8 +3472,8 @@ This setting defines options related to the user directory.
This option has the following sub-options:
* `enabled`: Defines whether users can search the user directory. If false then
empty responses are returned to all queries. Defaults to true.
* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver.
If false, search results will only contain users
* `search_all_users`: Defines whether to search all users visible to your HS when searching
the user directory. If false, search results will only contain users
visible in public rooms and users sharing a room with the requester.
Defaults to false.
@@ -4031,27 +4029,6 @@ worker_listeners:
resources:
- names: [client, federation]
```
---
### `worker_manhole`
A worker may have a listener for [`manhole`](../../manhole.md).
It allows server administrators to access a Python shell on the worker.
Example configuration:
```yaml
worker_manhole: 9000
```
This is a short form for:
```yaml
worker_listeners:
- port: 9000
bind_addresses: ['127.0.0.1']
type: manhole
```
It needs also an additional [`manhole_settings`](#manhole_settings) configuration.
---
### `worker_daemonize`

View File

@@ -1,11 +1,9 @@
# Logging Sample Configuration File
Below is a sample logging configuration file. This file can be tweaked to control how your
homeserver will output logs. The value of the `log_config` option in your homeserver config
should be the path to this file.
To apply changes made to this file, send Synapse a SIGHUP signal (or, if using `systemd`, run
`systemctl reload` on the Synapse service).
homeserver will output logs. A restart of the server is generally required to apply any
changes made to this file. The value of the `log_config` option in your homeserver
config should be the path to this file.
Note that a default logging configuration (shown below) is created automatically alongside
the homeserver config when following the [installation instructions](../../setup/installation.md).

View File

@@ -32,9 +32,28 @@ exclude = (?x)
|synapse/storage/databases/main/cache.py
|synapse/storage/schema/
|tests/api/test_auth.py
|tests/api/test_ratelimiting.py
|tests/app/test_openid_listener.py
|tests/appservice/test_scheduler.py
|tests/events/test_presence_router.py
|tests/events/test_utils.py
|tests/federation/test_federation_catch_up.py
|tests/federation/test_federation_sender.py
|tests/federation/transport/test_knocking.py
|tests/handlers/test_typing.py
|tests/http/federation/test_matrix_federation_agent.py
|tests/http/federation/test_srv_resolver.py
|tests/http/test_proxyagent.py
|tests/logging/__init__.py
|tests/logging/test_terse_json.py
|tests/module_api/test_api.py
|tests/rest/client/test_transactions.py
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
|tests/test_state.py
|tests/test_terms_auth.py
)$
[mypy-synapse.federation.transport.client]
@@ -64,45 +83,24 @@ disallow_untyped_defs = False
[mypy-tests.*]
disallow_untyped_defs = False
[mypy-tests.api.*]
disallow_untyped_defs = True
[mypy-tests.app.*]
disallow_untyped_defs = True
[mypy-tests.appservice.*]
disallow_untyped_defs = True
[mypy-tests.config.*]
disallow_untyped_defs = True
[mypy-tests.crypto.*]
disallow_untyped_defs = True
[mypy-tests.events.*]
disallow_untyped_defs = True
[mypy-tests.federation.*]
[mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True
[mypy-tests.handlers.*]
disallow_untyped_defs = True
[mypy-tests.http.*]
disallow_untyped_defs = True
[mypy-tests.logging.*]
disallow_untyped_defs = True
[mypy-tests.metrics.*]
disallow_untyped_defs = True
[mypy-tests.push.*]
disallow_untyped_defs = True
[mypy-tests.replication.*]
disallow_untyped_defs = True
[mypy-tests.rest.*]
disallow_untyped_defs = True
@@ -115,12 +113,6 @@ disallow_untyped_defs = True
[mypy-tests.test_server]
disallow_untyped_defs = True
[mypy-tests.test_state]
disallow_untyped_defs = True
[mypy-tests.test_terms_auth]
disallow_untyped_defs = True
[mypy-tests.types.*]
disallow_untyped_defs = True
@@ -147,6 +139,9 @@ disallow_untyped_defs = True
[mypy-authlib.*]
ignore_missing_imports = True
[mypy-canonicaljson]
ignore_missing_imports = True
[mypy-ijson.*]
ignore_missing_imports = True

3678
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -48,6 +48,11 @@ line-length = 88
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
#
# See https://github.com/charliermarsh/ruff/#pyflakes
# F401: unused import
# F811: Redefinition of unused
# F821: Undefined name
#
# flake8-bugbear compatible checks. Its error codes are described at
# https://github.com/charliermarsh/ruff/#flake8-bugbear
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
@@ -59,6 +64,9 @@ ignore = [
"B024",
"E501",
"E731",
"F401",
"F811",
"F821",
]
select = [
# pycodestyle checks.
@@ -89,7 +97,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.77.0"
version = "1.75.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -127,9 +135,7 @@ exclude = [
{ path = "synapse/*.so", format = "sdist"}
]
[tool.poetry.build]
script = "build_rust.py"
generate-setup-file = true
build = "build_rust.py"
[tool.poetry.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
@@ -311,7 +317,7 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
ruff = "0.0.230"
ruff = "0.0.215"
# Typechecking
mypy = "*"
@@ -352,7 +358,7 @@ towncrier = ">=18.6.0rc1"
# system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"]
requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"

View File

@@ -23,17 +23,13 @@ name = "synapse.synapse_rust"
anyhow = "1.0.63"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] }
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
pyo3-log = "0.7.0"
pythonize = "0.17.0"
regex = "1.6.0"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
[features]
extension-module = ["pyo3/extension-module"]
default = ["extension-module"]
[build-dependencies]
blake2 = "0.10.4"
hex = "0.4.3"

View File

@@ -13,7 +13,6 @@
// limitations under the License.
#![feature(test)]
use std::collections::BTreeSet;
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
};
@@ -33,9 +32,6 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@@ -72,9 +68,6 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@@ -111,9 +104,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@@ -150,9 +140,6 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
Default::default(),
@@ -163,15 +150,8 @@ fn bench_eval_message(b: &mut Bencher) {
)
.unwrap();
let rules = FilteredPushRules::py_new(
PushRules::new(Vec::new()),
Default::default(),
false,
false,
false,
false,
false,
);
let rules =
FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
}

View File

@@ -1,13 +1,7 @@
use lazy_static::lazy_static;
use pyo3::prelude::*;
use pyo3_log::ResetHandle;
pub mod push;
lazy_static! {
static ref LOGGING_HANDLE: ResetHandle = pyo3_log::init();
}
/// Returns the hash of all the rust source files at the time it was compiled.
///
/// Used by python to detect if the rust library is outdated.
@@ -23,20 +17,13 @@ fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
Ok((a + b).to_string())
}
/// Reset the cached logging configuration of pyo3-log to pick up any changes
/// in the Python logging configuration.
///
#[pyfunction]
fn reset_logging_config() {
LOGGING_HANDLE.reset();
}
/// The entry point for defining the Python module.
#[pymodule]
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
pyo3_log::init();
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?;
push::register_module(py, m)?;

View File

@@ -1,4 +1,4 @@
// Copyright 2022, 2023 The Matrix.org Foundation C.I.C.
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -63,23 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
// We don't want to notify on edits. Not only can this be confusing in real
// time (2 notifications, one message) but it's especially confusing
// if a bridge needs to edit a previously backfilled message.
PushRule {
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.m.relates_to.rel_type"),
pattern: Some(Cow::Borrowed("m.replace")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
priority_class: 5,
@@ -148,14 +131,6 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
@@ -164,19 +139,6 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::IsRoomMention),
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"),
priority_class: 5,
@@ -246,20 +208,6 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
];
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
@@ -648,68 +596,6 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
default: true,
default_enabled: true,
},
];
lazy_static! {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap, BTreeSet};
use std::collections::BTreeMap;
use anyhow::{Context, Error};
use lazy_static::lazy_static;
@@ -68,13 +68,6 @@ pub struct PushRuleEvaluator {
/// The "content.body", if any.
body: String,
/// True if the event has a mentions property and MSC3952 support is enabled.
has_mentions: bool,
/// The user mentions that were part of the message.
user_mentions: BTreeSet<String>,
/// True if the message is a room message.
room_mention: bool,
/// The number of users in the room.
room_member_count: u64,
@@ -107,9 +100,6 @@ impl PushRuleEvaluator {
#[new]
pub fn py_new(
flattened_keys: BTreeMap<String, String>,
has_mentions: bool,
user_mentions: BTreeSet<String>,
room_mention: bool,
room_member_count: u64,
sender_power_level: Option<i64>,
notification_power_levels: BTreeMap<String, i64>,
@@ -126,9 +116,6 @@ impl PushRuleEvaluator {
Ok(PushRuleEvaluator {
flattened_keys,
body,
has_mentions,
user_mentions,
room_mention,
room_member_count,
notification_power_levels,
sender_power_level,
@@ -159,19 +146,6 @@ impl PushRuleEvaluator {
}
let rule_id = &push_rule.rule_id().to_string();
// For backwards-compatibility the legacy mention rules are disabled
// if the event contains the 'm.mentions' property (and if the
// experimental feature is enabled, both of these are represented
// by the has_mentions flag).
if self.has_mentions
&& (rule_id == "global/override/.m.rule.contains_display_name"
|| rule_id == "global/content/.m.rule.contains_user_name"
|| rule_id == "global/override/.m.rule.roomnotif")
{
continue;
}
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
@@ -255,14 +229,6 @@ impl PushRuleEvaluator {
KnownCondition::RelatedEventMatch(event_match) => {
self.match_related_event_match(event_match, user_id)?
}
KnownCondition::IsUserMention => {
if let Some(uid) = user_id {
self.user_mentions.contains(uid)
} else {
false
}
}
KnownCondition::IsRoomMention => self.room_mention,
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
if !dn.is_empty() {
@@ -458,9 +424,6 @@ fn push_rule_evaluator() {
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
BTreeMap::new(),
@@ -486,9 +449,6 @@ fn test_requires_room_version_supports_condition() {
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
BTreeSet::new(),
false,
10,
Some(0),
BTreeMap::new(),
@@ -523,7 +483,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
None,
None,
);

View File

@@ -269,10 +269,6 @@ pub enum KnownCondition {
EventMatch(EventMatchCondition),
#[serde(rename = "im.nheko.msc3664.related_event_match")]
RelatedEventMatch(RelatedEventMatchCondition),
#[serde(rename = "org.matrix.msc3952.is_user_mention")]
IsUserMention,
#[serde(rename = "org.matrix.msc3952.is_room_mention")]
IsRoomMention,
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@@ -415,11 +411,8 @@ impl PushRules {
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
msc1767_enabled: bool,
}
#[pymethods]
@@ -428,20 +421,14 @@ impl FilteredPushRules {
pub fn py_new(
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
msc1767_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
msc1767_enabled,
msc3381_polls_enabled,
msc3664_enabled,
msc3952_intentional_mentions,
msc3958_suppress_edits_enabled,
msc1767_enabled,
}
}
@@ -460,28 +447,13 @@ impl FilteredPushRules {
.iter()
.filter(|rule| {
// Ignore disabled experimental push rules
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
return false;
}
if !self.msc3664_enabled
&& rule.rule_id == "global/override/.im.nheko.msc3664.reply"
{
return false;
}
if !self.msc3381_polls_enabled && rule.rule_id.contains("org.matrix.msc3930") {
return false;
}
if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952")
{
return false;
}
if !self.msc3958_suppress_edits_enabled
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
{
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
return false;
}
@@ -542,28 +514,6 @@ fn test_deserialize_unstable_msc3931_condition() {
));
}
#[test]
fn test_deserialize_unstable_msc3952_user_condition() {
let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::IsUserMention)
));
}
#[test]
fn test_deserialize_unstable_msc3952_room_condition() {
let json = r#"{"kind":"org.matrix.msc3952.is_room_mention"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::IsRoomMention)
));
}
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;

View File

@@ -190,7 +190,7 @@ fi
extra_test_args=()
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
test_tags="synapse_blacklist,msc3787,msc3874,msc3391"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -223,14 +223,12 @@ else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
# The tests for importing historical messages (MSC2716)
# only pass with monoliths, currently.
test_tags="$test_tags,msc2716"
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
# Enable the Twisted asyncio reactor
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
# We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with.
#
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
# currently.
test_tags="$test_tags,faster_joins,msc2716"
fi

View File

@@ -11,5 +11,6 @@
sqlite3 "$1" <<'EOF' >table-save.sql
.dump users
.dump access_tokens
.dump presence
.dump profiles
EOF

View File

@@ -101,43 +101,10 @@ echo
# Print out the commands being run
set -x
# Ensure the sort order of imports.
isort "${files[@]}"
# Ensure Python code conforms to an opinionated style.
python3 -m black "${files[@]}"
# Ensure the sample configuration file conforms to style checks.
./scripts-dev/config-lint.sh
# Catch any common programming mistakes in Python code.
# --quiet suppresses the update check.
ruff --quiet "${files[@]}"
# Catch any common programming mistakes in Rust code.
#
# --bins, --examples, --lib, --tests combined explicitly disable checking
# the benchmarks, which can fail due to `#![feature]` macros not being
# allowed on the stable rust toolchain (rustc error E0554).
#
# --allow-staged and --allow-dirty suppress clippy raising errors
# for uncommitted files. Only needed when using --fix.
#
# -D warnings disables the "warnings" lint.
#
# Using --fix has a tendency to cause subsequent runs of clippy to recompile
# rust code, which can slow down this script. Thus we run clippy without --fix
# first which is quick, and then re-run it with --fix if an error was found.
if ! cargo-clippy --bins --examples --lib --tests -- -D warnings > /dev/null 2>&1; then
cargo-clippy \
--bins --examples --lib --tests --allow-staged --allow-dirty --fix -- -D warnings
fi
# Ensure the formatting of Rust code.
cargo-fmt
# Ensure all Pydantic models use strict types.
./scripts-dev/check_pydantic_models.py lint
# Ensure type hints are correct.
mypy

View File

@@ -438,7 +438,7 @@ def _upload(gh_token: Optional[str]) -> None:
repo = get_repo_and_check_clean_checkout()
tag = repo.tag(f"refs/tags/{tag_name}")
if repo.head.commit != tag.commit:
click.echo(f"Tag {tag_name} ({tag.commit}) is not currently checked out!")
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
click.get_current_context().abort()
# Query all the assets corresponding to this release.

View File

@@ -7,6 +7,7 @@ from __future__ import annotations
from typing import (
Any,
Callable,
Generic,
Iterable,
Iterator,
List,

View File

@@ -5,8 +5,10 @@
from __future__ import annotations
from typing import (
AbstractSet,
Any,
Callable,
Generic,
Hashable,
Iterable,
Iterator,

View File

@@ -1,3 +1,2 @@
def sum_as_string(a: int, b: int) -> str: ...
def get_rust_file_digest() -> str: ...
def reset_logging_config() -> None: ...

View File

@@ -1,17 +1,3 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
from synapse.types import JsonDict
@@ -43,11 +29,8 @@ class FilteredPushRules:
self,
push_rules: PushRules,
enabled_map: Dict[str, bool],
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
msc1767_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@@ -57,9 +40,6 @@ class PushRuleEvaluator:
def __init__(
self,
flattened_keys: Mapping[str, str],
has_mentions: bool,
user_mentions: Set[str],
room_mention: bool,
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],

View File

@@ -51,7 +51,6 @@ from synapse.logging.context import (
make_deferred_yieldable,
run_in_background,
)
from synapse.notifier import ReplicationNotifier
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
from synapse.storage.databases.main import PushRuleStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
@@ -261,9 +260,6 @@ class MockHomeserver:
def should_send_federation(self) -> bool:
return False
def get_replication_notifier(self) -> ReplicationNotifier:
return ReplicationNotifier()
class Porter:
def __init__(

View File

@@ -17,8 +17,6 @@
"""Contains constants from the specification."""
import enum
from typing_extensions import Final
# the max size of a (canonical-json-encoded) event
@@ -233,9 +231,6 @@ class EventContentFields:
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
# Use for mentioning users.
MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions"
# an unspecced field added to to-device messages to identify them uniquely-ish
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
@@ -254,7 +249,6 @@ class RoomEncryptionAlgorithms:
class AccountDataTypes:
DIRECT: Final = "m.direct"
IGNORED_USER_LIST: Final = "m.ignored_user_list"
TAG: Final = "m.tag"
class HistoryVisibility:
@@ -295,8 +289,3 @@ class ApprovalNoticeMedium:
NONE = "org.matrix.msc3866.none"
EMAIL = "org.matrix.msc3866.email"
class Direction(enum.Enum):
BACKWARDS = "b"
FORWARDS = "f"

View File

@@ -252,9 +252,9 @@ class FilterCollection:
return self._room_timeline_filter.unread_thread_notifications
async def filter_presence(
self, presence_states: Iterable[UserPresenceState]
self, events: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
return await self._presence_filter.filter(presence_states)
return await self._presence_filter.filter(events)
async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return await self._account_data.filter(events)

View File

@@ -35,7 +35,6 @@ from synapse.storage.databases.main.appservice import (
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
@@ -44,7 +43,6 @@ from synapse.storage.databases.main.event_push_actions import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.filtering import FilteringWorkerStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
@@ -56,7 +54,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import JsonDict, StateMap
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -65,7 +63,6 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
FilteringWorkerStore,
ClientIpWorkerStore,
DeviceWorkerStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
@@ -85,7 +82,6 @@ class AdminCmdSlavedStore(
EventsWorkerStore,
RegistrationWorkerStore,
RoomWorkerStore,
ProfileWorkerStore,
):
def __init__(
self,
@@ -196,32 +192,6 @@ class FileExfiltrationWriter(ExfiltrationWriter):
for event in state.values():
print(json.dumps(event), file=f)
def write_profile(self, profile: JsonDict) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
os.makedirs(user_directory, exist_ok=True)
profile_file = os.path.join(user_directory, "profile")
with open(profile_file, "a") as f:
print(json.dumps(profile), file=f)
def write_devices(self, devices: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
os.makedirs(user_directory, exist_ok=True)
device_file = os.path.join(user_directory, "devices")
for device in devices:
with open(device_file, "a") as f:
print(json.dumps(device), file=f)
def write_connections(self, connections: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
os.makedirs(user_directory, exist_ok=True)
connection_file = os.path.join(user_directory, "connections")
for connection in connections:
with open(connection_file, "a") as f:
print(json.dumps(connection), file=f)
def finished(self) -> str:
return self.base_directory

View File

@@ -110,8 +110,6 @@ def _worker_entrypoint(
and then kick off the worker's main() function.
"""
from synapse.util.stringutils import strtobool
sys.argv = args
# reset the custom signal handlers that we installed, so that the children start
@@ -119,24 +117,9 @@ def _worker_entrypoint(
for sig, handler in _original_signal_handlers.items():
signal.signal(sig, handler)
# Install the asyncio reactor if the
# SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR is set to 1. The
# SYNAPSE_ASYNC_IO_REACTOR variable would be used, but then causes
# synapse/__init__.py to also try to install an asyncio reactor.
if strtobool(
os.environ.get("SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR", "0")
):
import asyncio
from twisted.internet.asyncioreactor import AsyncioSelectorReactor
reactor = AsyncioSelectorReactor(asyncio.get_event_loop())
proxy_reactor._install_real_reactor(reactor)
else:
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
func()

View File

@@ -282,6 +282,13 @@ def start(config_options: List[str]) -> None:
"synapse.app.user_dir",
)
if config.experimental.faster_joins_enabled:
raise ConfigError(
"You have enabled the experimental `faster_joins` config option, but it is "
"not compatible with worker deployments yet. Please disable `faster_joins` "
"or run Synapse as a single process deployment instead."
)
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage

View File

@@ -16,7 +16,7 @@
import logging
import re
from enum import Enum
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern, Sequence
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern
import attr
from netaddr import IPSet
@@ -377,7 +377,7 @@ class AppServiceTransaction:
self,
service: ApplicationService,
id: int,
events: Sequence[EventBase],
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
one_time_keys_count: TransactionOneTimeKeysCount,

View File

@@ -14,17 +14,7 @@
# limitations under the License.
import logging
import urllib.parse
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
)
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
from prometheus_client import Counter
from typing_extensions import TypeGuard
@@ -269,7 +259,7 @@ class ApplicationServiceApi(SimpleHttpClient):
async def push_bulk(
self,
service: "ApplicationService",
events: Sequence[EventBase],
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
one_time_keys_count: TransactionOneTimeKeysCount,

View File

@@ -57,7 +57,6 @@ from typing import (
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
)
@@ -365,7 +364,7 @@ class _TransactionController:
async def send(
self,
service: ApplicationService,
events: Sequence[EventBase],
events: List[EventBase],
ephemeral: Optional[List[JsonDict]] = None,
to_device_messages: Optional[List[JsonDict]] = None,
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,

View File

@@ -174,29 +174,15 @@ class Config:
@staticmethod
def parse_size(value: Union[str, int]) -> int:
"""Interpret `value` as a number of bytes.
If an integer is provided it is treated as bytes and is unchanged.
String byte sizes can have a suffix of 'K' or `M`, representing kibibytes and
mebibytes respectively. No suffix is understood as a plain byte count.
Raises:
TypeError, if given something other than an integer or a string
ValueError: if given a string not of the form described above.
"""
if type(value) is int:
if isinstance(value, int):
return value
elif type(value) is str:
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
else:
raise TypeError(f"Bad byte size {value!r}")
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
@staticmethod
def parse_duration(value: Union[str, int]) -> int:
@@ -212,36 +198,22 @@ class Config:
Returns:
The number of milliseconds in the duration.
Raises:
TypeError, if given something other than an integer or a string
ValueError: if given a string not of the form described above.
"""
if type(value) is int:
if isinstance(value, int):
return value
elif type(value) is str:
second = 1000
minute = 60 * second
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
sizes = {
"s": second,
"m": minute,
"h": hour,
"d": day,
"w": week,
"y": year,
}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
else:
raise TypeError(f"Bad duration {value!r}")
second = 1000
minute = 60 * second
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
@staticmethod
def abspath(file_path: str) -> str:

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
import argparse
from typing import (
Any,
@@ -18,7 +20,7 @@ from typing import (
import jinja2
from synapse.config import ( # noqa: F401
from synapse.config import (
account_validity,
api,
appservice,
@@ -167,7 +169,7 @@ class RootConfig:
self, section_name: Literal["caches"]
) -> cache.CacheConfig: ...
@overload
def reload_config_section(self, section_name: str) -> "Config": ...
def reload_config_section(self, section_name: str) -> Config: ...
class Config:
root: RootConfig
@@ -200,9 +202,9 @@ def find_config_files(search_paths: List[str]) -> List[str]: ...
class ShardedWorkerHandlingConfig:
instances: List[str]
def __init__(self, instances: List[str]) -> None: ...
def should_handle(self, instance_name: str, key: str) -> bool: ... # noqa: F811
def should_handle(self, instance_name: str, key: str) -> bool: ...
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
def get_instance(self, key: str) -> str: ... # noqa: F811
def get_instance(self, key: str) -> str: ...
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...

View File

@@ -126,7 +126,7 @@ class CacheConfig(Config):
cache_config = config.get("caches") or {}
self.global_factor = cache_config.get("global_factor", _DEFAULT_FACTOR_SIZE)
if type(self.global_factor) not in (int, float):
if not isinstance(self.global_factor, (int, float)):
raise ConfigError("caches.global_factor must be a number.")
# Load cache factors from the config
@@ -151,7 +151,7 @@ class CacheConfig(Config):
)
for cache, factor in individual_factors.items():
if type(factor) not in (int, float):
if not isinstance(factor, (int, float)):
raise ConfigError(
"caches.per_cache_factors.%s must be a number" % (cache,)
)

View File

@@ -17,7 +17,6 @@ from typing import Any, Optional
import attr
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config import ConfigError
from synapse.config._base import Config
from synapse.types import JsonDict
@@ -75,16 +74,12 @@ class ExperimentalConfig(Config):
)
# MSC3706 (server-side support for partial state in /send_join responses)
# Synapse will always serve partial state responses to requests using the stable
# query parameter `omit_members`. If this flag is set, Synapse will also serve
# partial state responses to requests using the unstable query parameter
# `org.matrix.msc3706.partial_state`.
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
# experimental support for faster joins over federation
# (MSC2775, MSC3706, MSC3895)
# requires a target server that can provide a partial join response (MSC3706)
self.faster_joins_enabled: bool = experimental.get("faster_joins", True)
# requires a target server with msc3706_enabled enabled.
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
# MSC3720 (Account status endpoint)
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
@@ -98,9 +93,6 @@ class ExperimentalConfig(Config):
# MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
# MSC3773: Thread notifications
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
@@ -135,24 +127,6 @@ class ExperimentalConfig(Config):
"msc3886_endpoint", None
)
# MSC3890: Remotely silence local notifications
# Note: This option requires "experimental_features.msc3391_enabled" to be
# set to "true", in order to communicate account data deletions to clients.
self.msc3890_enabled: bool = experimental.get("msc3890_enabled", False)
if self.msc3890_enabled and not self.msc3391_enabled:
raise ConfigError(
"Option 'experimental_features.msc3391' must be set to 'true' to "
"enable 'experimental_features.msc3890'. MSC3391 functionality is "
"required to communicate account data deletions to clients."
)
# MSC3381: Polls.
# In practice, supporting polls in Synapse only requires an implementation of
# MSC3930: Push rules for MSC3391 polls; which is what this option enables.
self.msc3381_polls_enabled: bool = experimental.get(
"msc3381_polls_enabled", False
)
# MSC3912: Relation-based redactions.
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
@@ -168,13 +142,3 @@ class ExperimentalConfig(Config):
# MSC3925: do not replace events with their edits
self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False)
# MSC3952: Intentional mentions
self.msc3952_intentional_mentions = experimental.get(
"msc3952_intentional_mentions", False
)
# MSC3959: Do not generate notifications for edits.
self.msc3958_supress_edit_notifs = experimental.get(
"msc3958_supress_edit_notifs", False
)

View File

@@ -34,7 +34,6 @@ from twisted.logger import (
from synapse.logging.context import LoggingContextFilter
from synapse.logging.filter import MetadataFilter
from synapse.synapse_rust import reset_logging_config
from synapse.types import JsonDict
from ..util import SYNAPSE_VERSION
@@ -201,26 +200,6 @@ def _setup_stdlib_logging(
"""
Set up Python standard library logging.
"""
# We add a log record factory that runs all messages through the
# LoggingContextFilter so that we get the context *at the time we log*
# rather than when we write to a handler. This can be done in config using
# filter options, but care must when using e.g. MemoryHandler to buffer
# writes.
log_context_filter = LoggingContextFilter()
log_metadata_filter = MetadataFilter({"server_name": config.server.server_name})
old_factory = logging.getLogRecordFactory()
def factory(*args: Any, **kwargs: Any) -> logging.LogRecord:
record = old_factory(*args, **kwargs)
log_context_filter.filter(record)
log_metadata_filter.filter(record)
return record
logging.setLogRecordFactory(factory)
# Configure the logger with the initial configuration.
if log_config_path is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
@@ -240,6 +219,24 @@ def _setup_stdlib_logging(
# Load the logging configuration.
_load_logging_config(log_config_path)
# We add a log record factory that runs all messages through the
# LoggingContextFilter so that we get the context *at the time we log*
# rather than when we write to a handler. This can be done in config using
# filter options, but care must when using e.g. MemoryHandler to buffer
# writes.
log_context_filter = LoggingContextFilter()
log_metadata_filter = MetadataFilter({"server_name": config.server.server_name})
old_factory = logging.getLogRecordFactory()
def factory(*args: Any, **kwargs: Any) -> logging.LogRecord:
record = old_factory(*args, **kwargs)
log_context_filter.filter(record)
log_metadata_filter.filter(record)
return record
logging.setLogRecordFactory(factory)
# Route Twisted's native logging through to the standard library logging
# system.
observer = STDLibLogObserver()
@@ -297,9 +294,6 @@ def _load_logging_config(log_config_path: str) -> None:
logging.config.dictConfig(log_config)
# Blow away the pyo3-log cache so that it reloads the configuration.
reset_logging_config()
def _reload_logging_config(log_config_path: Optional[str]) -> None:
"""

View File

@@ -151,7 +151,7 @@ DEFAULT_IP_RANGE_BLACKLIST = [
"fec0::/10",
]
DEFAULT_ROOM_VERSION = "10"
DEFAULT_ROOM_VERSION = "9"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
@@ -904,7 +904,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
port = listener.get("port")
if type(port) is not int:
if not isinstance(port, int):
raise ConfigError("Listener configuration is lacking a valid 'port' option")
tls = listener.get("tls", False)

View File

@@ -154,21 +154,17 @@ class Keyring:
if key_fetchers is None:
key_fetchers = (
# Fetch keys from the database.
StoreKeyFetcher(hs),
# Fetch keys from a configured Perspectives server.
PerspectivesKeyFetcher(hs),
# Fetch keys from the origin server directly.
ServerKeyFetcher(hs),
)
self._key_fetchers = key_fetchers
self._fetch_keys_queue: BatchingQueue[
self._server_queue: BatchingQueue[
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
] = BatchingQueue(
"keyring_server",
clock=hs.get_clock(),
# The method called to fetch each key
process_batch_callback=self._inner_fetch_key_requests,
)
@@ -291,7 +287,7 @@ class Keyring:
minimum_valid_until_ts=verify_request.minimum_valid_until_ts,
key_ids=list(key_ids_to_find),
)
found_keys_by_server = await self._fetch_keys_queue.add_to_queue(
found_keys_by_server = await self._server_queue.add_to_queue(
key_request, key=verify_request.server_name
)
@@ -356,17 +352,7 @@ class Keyring:
async def _inner_fetch_key_requests(
self, requests: List[_FetchKeyRequest]
) -> Dict[str, Dict[str, FetchKeyResult]]:
"""Processing function for the queue of `_FetchKeyRequest`.
Takes a list of key fetch requests, de-duplicates them and then carries out
each request by invoking self._inner_fetch_key_request.
Args:
requests: A list of requests for homeserver verify keys.
Returns:
{server name: {key id: fetch key result}}
"""
"""Processing function for the queue of `_FetchKeyRequest`."""
logger.debug("Starting fetch for %s", requests)
@@ -411,23 +397,8 @@ class Keyring:
async def _inner_fetch_key_request(
self, verify_request: _FetchKeyRequest
) -> Dict[str, FetchKeyResult]:
"""Attempt to fetch the given key by calling each key fetcher one by one.
If a key is found, check whether its `valid_until_ts` attribute satisfies the
`minimum_valid_until_ts` attribute of the `verify_request`. If it does, we
refrain from asking subsequent fetchers for that key.
Even if the above check fails, we still return the found key - the caller may
still find the invalid key result useful. In this case, we continue to ask
subsequent fetchers for the invalid key, in case they return a valid result
for it. This can happen when fetching a stale key result from the database,
before querying the origin server for an up-to-date result.
Args:
verify_request: The request for a verify key. Can include multiple key IDs.
Returns:
A map of {key_id: the key fetch result}.
"""Attempt to fetch the given key by calling each key fetcher one by
one.
"""
logger.debug("Starting fetch for %s", verify_request)
@@ -449,22 +420,26 @@ class Keyring:
if not key:
continue
# If we already have a result for the given key ID, we keep the
# If we already have a result for the given key ID we keep the
# one with the highest `valid_until_ts`.
existing_key = found_keys.get(key_id)
if existing_key and existing_key.valid_until_ts > key.valid_until_ts:
continue
if existing_key:
if key.valid_until_ts <= existing_key.valid_until_ts:
continue
# Check if this key's expiry timestamp is valid for the verify request.
if key.valid_until_ts >= verify_request.minimum_valid_until_ts:
# Stop looking for this key from subsequent fetchers.
missing_key_ids.discard(key_id)
# We always store the returned key even if it doesn't meet the
# We always store the returned key even if it doesn't the
# `minimum_valid_until_ts` requirement, as some verification
# requests may still be able to be satisfied by it.
#
# We still keep looking for the key from other fetchers in that
# case though.
found_keys[key_id] = key
if key.valid_until_ts < verify_request.minimum_valid_until_ts:
continue
missing_key_ids.discard(key_id)
return found_keys

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import logging
import typing
from typing import (
@@ -875,11 +874,11 @@ def _check_power_levels(
"kick",
"invite",
}:
if type(v) is not int:
if not isinstance(v, int):
raise SynapseError(400, f"{v!r} must be an integer.")
if k in {"events", "notifications", "users"}:
if not isinstance(v, collections.abc.Mapping) or not all(
type(v) is int for v in v.values()
if not isinstance(v, dict) or not all(
isinstance(v, int) for v in v.values()
):
raise SynapseError(
400,

View File

@@ -605,11 +605,10 @@ class EventClientSerializer:
_PowerLevel = Union[str, int]
PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
def copy_and_fixup_power_levels_contents(
old_power_levels: PowerLevelsContent,
old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
) -> Dict[str, Union[int, Dict[str, int]]]:
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
@@ -648,10 +647,10 @@ def _copy_power_level_value_as_integer(
) -> None:
"""Set `power_levels[key]` to the integer represented by `old_value`.
:raises TypeError: if `old_value` is neither an integer nor a base-10 string
:raises TypeError: if `old_value` is not an integer, nor a base-10 string
representation of an integer.
"""
if type(old_value) is int:
if isinstance(old_value, int):
power_levels[key] = old_value
return
@@ -679,7 +678,7 @@ def validate_canonicaljson(value: Any) -> None:
* Floats
* NaN, Infinity, -Infinity
"""
if type(value) is int:
if isinstance(value, int):
if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value:
raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON)

View File

@@ -139,7 +139,7 @@ class EventValidator:
max_lifetime = event.content.get("max_lifetime")
if min_lifetime is not None:
if type(min_lifetime) is not int:
if not isinstance(min_lifetime, int):
raise SynapseError(
code=400,
msg="'min_lifetime' must be an integer",
@@ -147,7 +147,7 @@ class EventValidator:
)
if max_lifetime is not None:
if type(max_lifetime) is not int:
if not isinstance(max_lifetime, int):
raise SynapseError(
code=400,
msg="'max_lifetime' must be an integer",

View File

@@ -280,7 +280,7 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
_strip_unsigned_values(pdu_json)
depth = pdu_json["depth"]
if type(depth) is not int:
if not isinstance(depth, int):
raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON)
if depth < 0:

View File

@@ -19,7 +19,6 @@ import itertools
import logging
from typing import (
TYPE_CHECKING,
AbstractSet,
Awaitable,
Callable,
Collection,
@@ -38,7 +37,7 @@ from typing import (
import attr
from prometheus_client import Counter
from synapse.api.constants import Direction, EventContentFields, EventTypes, Membership
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import (
CodeMessageException,
Codes,
@@ -111,9 +110,8 @@ class SendJoinResult:
# True if 'state' elides non-critical membership events
partial_state: bool
# If 'partial_state' is set, a set of the servers in the room (otherwise empty).
# Always contains the server we joined off.
servers_in_room: AbstractSet[str]
# if 'partial_state' is set, a list of the servers in the room (otherwise empty)
servers_in_room: List[str]
class FederationClient(FederationBase):
@@ -1016,11 +1014,7 @@ class FederationClient(FederationBase):
)
async def send_join(
self,
destinations: Iterable[str],
pdu: EventBase,
room_version: RoomVersion,
partial_state: bool = True,
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
) -> SendJoinResult:
"""Sends a join event to one of a list of homeservers.
@@ -1033,10 +1027,6 @@ class FederationClient(FederationBase):
pdu: event to be sent
room_version: the version of the room (according to the server that
did the make_join)
partial_state: whether to ask the remote server to omit membership state
events from the response. If the remote server complies,
`partial_state` in the send join result will be set. Defaults to
`True`.
Returns:
The result of the send join request.
@@ -1047,9 +1037,7 @@ class FederationClient(FederationBase):
"""
async def send_request(destination: str) -> SendJoinResult:
response = await self._do_send_join(
room_version, destination, pdu, omit_members=partial_state
)
response = await self._do_send_join(room_version, destination, pdu)
# If an event was returned (and expected to be returned):
#
@@ -1154,32 +1142,18 @@ class FederationClient(FederationBase):
% (auth_chain_create_events,)
)
servers_in_room = None
if response.servers_in_room is not None:
servers_in_room = set(response.servers_in_room)
if response.members_omitted:
if not servers_in_room:
raise InvalidResponseError(
"members_omitted was set, but no servers were listed in the room"
)
if not partial_state:
raise InvalidResponseError(
"members_omitted was set, but we asked for full state"
)
# `servers_in_room` is supposed to be a complete list.
# Fix things up in case the remote homeserver is badly behaved.
servers_in_room.add(destination)
if response.partial_state and not response.servers_in_room:
raise InvalidResponseError(
"partial_state was set, but no servers were listed in the room"
)
return SendJoinResult(
event=event,
state=signed_state,
auth_chain=signed_auth,
origin=destination,
partial_state=response.members_omitted,
servers_in_room=servers_in_room or frozenset(),
partial_state=response.partial_state,
servers_in_room=response.servers_in_room or [],
)
# MSC3083 defines additional error codes for room joins.
@@ -1203,11 +1177,7 @@ class FederationClient(FederationBase):
)
async def _do_send_join(
self,
room_version: RoomVersion,
destination: str,
pdu: EventBase,
omit_members: bool,
self, room_version: RoomVersion, destination: str, pdu: EventBase
) -> SendJoinResponse:
time_now = self._clock.time_msec()
@@ -1218,7 +1188,6 @@ class FederationClient(FederationBase):
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
omit_members=omit_members,
)
except HttpResponseException as e:
# If an error is received that is due to an unrecognised endpoint,
@@ -1691,12 +1660,7 @@ class FederationClient(FederationBase):
return result
async def timestamp_to_event(
self,
*,
destinations: List[str],
room_id: str,
timestamp: int,
direction: Direction,
self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
) -> Optional["TimestampToEventResponse"]:
"""
Calls each remote federating server from `destinations` asking for their closest
@@ -1709,7 +1673,7 @@ class FederationClient(FederationBase):
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: indicates whether we should navigate forward
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
@@ -1754,7 +1718,7 @@ class FederationClient(FederationBase):
return None
async def _timestamp_to_event_from_destination(
self, destination: str, room_id: str, timestamp: int, direction: Direction
self, destination: str, room_id: str, timestamp: int, direction: str
) -> "TimestampToEventResponse":
"""
Calls a remote federating server at `destination` asking for their
@@ -1767,7 +1731,7 @@ class FederationClient(FederationBase):
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: indicates whether we should navigate forward
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
@@ -1880,7 +1844,7 @@ class TimestampToEventResponse:
)
origin_server_ts = d.get("origin_server_ts")
if type(origin_server_ts) is not int:
if not isinstance(origin_server_ts, int):
raise ValueError(
"Invalid response: 'origin_server_ts' must be a int but received %r"
% origin_server_ts

View File

@@ -34,13 +34,7 @@ from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import (
Direction,
EduTypes,
EventContentFields,
EventTypes,
Membership,
)
from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -68,9 +62,7 @@ from synapse.logging.context import (
run_in_background,
)
from synapse.logging.opentracing import (
SynapseTags,
log_kv,
set_tag,
start_active_span_from_edu,
tag_args,
trace,
@@ -224,7 +216,7 @@ class FederationServer(FederationBase):
return 200, res
async def on_timestamp_to_event_request(
self, origin: str, room_id: str, timestamp: int, direction: Direction
self, origin: str, room_id: str, timestamp: int, direction: str
) -> Tuple[int, Dict[str, Any]]:
"""When we receive a federated `/timestamp_to_event` request,
handle all of the logic for validating and fetching the event.
@@ -234,7 +226,7 @@ class FederationServer(FederationBase):
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: indicates whether we should navigate forward
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
@@ -686,10 +678,6 @@ class FederationServer(FederationBase):
room_id: str,
caller_supports_partial_state: bool = False,
) -> Dict[str, Any]:
set_tag(
SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE,
caller_supports_partial_state,
)
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
requester=None,
key=room_id,
@@ -737,12 +725,10 @@ class FederationServer(FederationBase):
"state": [p.get_pdu_json(time_now) for p in state_events],
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
"org.matrix.msc3706.partial_state": caller_supports_partial_state,
"members_omitted": caller_supports_partial_state,
}
if servers_in_room is not None:
resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room)
resp["servers_in_room"] = list(servers_in_room)
return resp
@@ -1514,7 +1500,7 @@ def _get_event_ids_for_partial_state_join(
prev_state_ids: StateMap[str],
summary: Dict[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be returned in a partial_state send_join
"""Calculate state to be retuned in a partial_state send_join
Args:
join_event: the join event being send_joined

View File

@@ -447,7 +447,7 @@ class FederationSender(AbstractFederationSender):
)
)
if partial_state_destinations is not None:
if len(partial_state_destinations) > 0:
destinations = partial_state_destinations
if destinations is None:

View File

@@ -32,7 +32,7 @@ from typing import (
import attr
import ijson
from synapse.api.constants import Direction, Membership
from synapse.api.constants import Membership
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.api.urls import (
@@ -102,10 +102,6 @@ class TransportLayerClient:
destination,
path=path,
args={"event_id": event_id},
# This can take a looooooong time for large rooms. Give this a generous
# timeout of 10 minutes to avoid the partial state resync timing out early
# and trying a bunch of servers who haven't seen our join yet.
timeout=600_000,
parser=_StateParser(room_version),
)
@@ -169,7 +165,7 @@ class TransportLayerClient:
)
async def timestamp_to_event(
self, destination: str, room_id: str, timestamp: int, direction: Direction
self, destination: str, room_id: str, timestamp: int, direction: str
) -> Union[JsonDict, List]:
"""
Calls a remote federating server at `destination` asking for their
@@ -180,7 +176,7 @@ class TransportLayerClient:
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: indicates whether we should navigate forward
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
@@ -194,7 +190,7 @@ class TransportLayerClient:
room_id,
)
args = {"ts": [str(timestamp)], "dir": [direction.value]}
args = {"ts": [str(timestamp)], "dir": [direction]}
remote_response = await self.client.get_json(
destination, path=path, args=args, try_trailing_slash_on_400=True
@@ -355,16 +351,12 @@ class TransportLayerClient:
room_id: str,
event_id: str,
content: JsonDict,
omit_members: bool,
) -> "SendJoinResponse":
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
query_params: Dict[str, str] = {}
if self._faster_joins_enabled:
# lazy-load state on join
query_params["org.matrix.msc3706.partial_state"] = (
"true" if omit_members else "false"
)
query_params["omit_members"] = "true" if omit_members else "false"
query_params["org.matrix.msc3706.partial_state"] = "true"
return await self.client.put_json(
destination=destination,
@@ -802,7 +794,7 @@ class SendJoinResponse:
event: Optional[EventBase] = None
# The room state is incomplete
members_omitted: bool = False
partial_state: bool = False
# List of servers in the room
servers_in_room: Optional[List[str]] = None
@@ -842,18 +834,16 @@ def _event_list_parser(
@ijson.coroutine
def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, None]:
def _partial_state_parser(response: SendJoinResponse) -> Generator[None, Any, None]:
"""Helper function for use with `ijson.items_coro`
Parses the members_omitted field in send_join responses
Parses the partial_state field in send_join responses
"""
while True:
val = yield
if not isinstance(val, bool):
raise TypeError(
"members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean"
)
response.members_omitted = val
raise TypeError("partial_state must be a boolean")
response.partial_state = val
@ijson.coroutine
@@ -914,19 +904,11 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
if not v1_api:
self._coros.append(
ijson.items_coro(
_members_omitted_parser(self._response),
_partial_state_parser(self._response),
"org.matrix.msc3706.partial_state",
use_float="True",
)
)
# The stable field name comes last, so it "wins" if the fields disagree
self._coros.append(
ijson.items_coro(
_members_omitted_parser(self._response),
"members_omitted",
use_float="True",
)
)
self._coros.append(
ijson.items_coro(
@@ -936,15 +918,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
)
)
# Again, stable field name comes last
self._coros.append(
ijson.items_coro(
_servers_in_room_parser(self._response),
"servers_in_room",
use_float="True",
)
)
def write(self, data: bytes) -> int:
for c in self._coros:
c.send(data)

View File

@@ -26,7 +26,7 @@ from typing import (
from typing_extensions import Literal
from synapse.api.constants import Direction, EduTypes
from synapse.api.constants import EduTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
@@ -234,10 +234,9 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
room_id: str,
) -> Tuple[int, JsonDict]:
timestamp = parse_integer_from_args(query, "ts", required=True)
direction_str = parse_string_from_args(
query, "dir", allowed_values=["f", "b"], required=True
direction = parse_string_from_args(
query, "dir", default="f", allowed_values=["f", "b"], required=True
)
direction = Direction(direction_str)
return await self.handler.on_timestamp_to_event_request(
origin, room_id, timestamp, direction
@@ -423,7 +422,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled
self._msc3706_enabled = hs.config.experimental.msc3706_enabled
async def on_PUT(
self,
@@ -437,16 +436,10 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
# match those given in content
partial_state = False
# The stable query parameter wins, if it disagrees with the unstable
# parameter for some reason.
stable_param = parse_boolean_from_args(query, "omit_members", default=None)
if stable_param is not None:
partial_state = stable_param
elif self._read_msc3706_query_param:
if self._msc3706_enabled:
partial_state = parse_boolean_from_args(
query, "org.matrix.msc3706.partial_state", default=False
)
result = await self.handler.on_send_join_request(
origin, content, room_id, caller_supports_partial_state=partial_state
)

View File

@@ -14,9 +14,8 @@
# limitations under the License.
import logging
import random
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
from typing import TYPE_CHECKING, Awaitable, Callable, Collection, List, Optional, Tuple
from synapse.api.constants import AccountDataTypes
from synapse.replication.http.account_data import (
ReplicationAddRoomAccountDataRestServlet,
ReplicationAddTagRestServlet,
@@ -26,7 +25,7 @@ from synapse.replication.http.account_data import (
ReplicationRemoveUserAccountDataRestServlet,
)
from synapse.streams import EventSource
from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID
from synapse.types import JsonDict, StreamKeyType, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -314,7 +313,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
def get_current_key(self) -> int:
def get_current_key(self, direction: str = "f") -> int:
return self.store.get_max_account_data_stream_id()
async def get_new_events(
@@ -322,7 +321,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
user: UserID,
from_key: int,
limit: int,
room_ids: StrCollection,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
@@ -336,11 +335,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
for room_id, room_tags in tags.items():
results.append(
{
"type": AccountDataTypes.TAG,
"content": {"tags": room_tags},
"room_id": room_id,
}
{"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
)
(

View File

@@ -16,7 +16,7 @@ import abc
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
from synapse.api.constants import Direction, Membership
from synapse.api.constants import Membership
from synapse.events import EventBase
from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID
from synapse.visibility import filter_events_for_client
@@ -30,7 +30,6 @@ logger = logging.getLogger(__name__)
class AdminHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
@@ -198,7 +197,7 @@ class AdminHandler:
# efficient method perhaps but it does guarantee we get everything.
while True:
events, _ = await self.store.paginate_room_events(
room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS
room_id, from_key, to_key, limit=100, direction="f"
)
if not events:
break
@@ -248,21 +247,6 @@ class AdminHandler:
)
writer.write_state(room_id, event_id, state)
# Get the user profile
profile = await self.get_user(UserID.from_string(user_id))
if profile is not None:
writer.write_profile(profile)
# Get all devices the user has
devices = await self._device_handler.get_devices_by_user(user_id)
writer.write_devices(devices)
# Get all connections the user has
connections = await self.get_whois(UserID.from_string(user_id))
writer.write_connections(
connections["devices"][""]["sessions"][0]["connections"]
)
return writer.finished()
@@ -313,33 +297,6 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
"""
raise NotImplementedError()
@abc.abstractmethod
def write_profile(self, profile: JsonDict) -> None:
"""Write the profile of a user.
Args:
profile: The user profile.
"""
raise NotImplementedError()
@abc.abstractmethod
def write_devices(self, devices: List[JsonDict]) -> None:
"""Write the devices of a user.
Args:
devices: The list of devices.
"""
raise NotImplementedError()
@abc.abstractmethod
def write_connections(self, connections: List[JsonDict]) -> None:
"""Write the connections of a user.
Args:
connections: The list of connections / sessions.
"""
raise NotImplementedError()
@abc.abstractmethod
def finished(self) -> Any:
"""Called when all data has successfully been exported and written.

View File

@@ -18,6 +18,7 @@ from http import HTTPStatus
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
Iterable,
List,
@@ -44,7 +45,6 @@ from synapse.metrics.background_process_metrics import (
)
from synapse.types import (
JsonDict,
StrCollection,
StreamKeyType,
StreamToken,
UserID,
@@ -146,7 +146,7 @@ class DeviceWorkerHandler:
@cancellable
async def get_device_changes_in_shared_rooms(
self, user_id: str, room_ids: StrCollection, from_token: StreamToken
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
) -> Set[str]:
"""Get the set of users whose devices have changed who share a room with
the given user.
@@ -346,7 +346,6 @@ class DeviceHandler(DeviceWorkerHandler):
super().__init__(hs)
self.federation_sender = hs.get_federation_sender()
self._account_data_handler = hs.get_account_data_handler()
self._storage_controllers = hs.get_storage_controllers()
self.device_list_updater = DeviceListUpdater(hs, self)
@@ -503,7 +502,7 @@ class DeviceHandler(DeviceWorkerHandler):
else:
raise
# Delete data specific to each device. Not optimised as it is not
# Delete access tokens and e2e keys for each device. Not optimised as it is not
# considered as part of a critical path.
for device_id in device_ids:
await self._auth_handler.delete_access_tokens_for_user(
@@ -513,14 +512,6 @@ class DeviceHandler(DeviceWorkerHandler):
user_id=user_id, device_id=device_id
)
if self.hs.config.experimental.msc3890_enabled:
# Remove any local notification settings for this device in accordance
# with MSC3890.
await self._account_data_handler.remove_account_data_for_user(
user_id,
f"org.matrix.msc3890.local_notification_settings.{device_id}",
)
await self.notify_device_update(user_id, device_ids)
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
@@ -551,7 +542,7 @@ class DeviceHandler(DeviceWorkerHandler):
@trace
@measure_func("notify_device_update")
async def notify_device_update(
self, user_id: str, device_ids: StrCollection
self, user_id: str, device_ids: Collection[str]
) -> None:
"""Notify that a user's device(s) has changed. Pokes the notifier, and
remote servers if the user is local.
@@ -859,7 +850,6 @@ class DeviceHandler(DeviceWorkerHandler):
known_hosts_at_join = await self.store.get_partial_state_servers_at_join(
room_id
)
assert known_hosts_at_join is not None
potentially_changed_hosts.difference_update(known_hosts_at_join)
potentially_changed_hosts.discard(self.server_name)
@@ -975,7 +965,6 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
self.device_handler = device_handler
self._notifier = hs.get_notifier()
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
@@ -1056,7 +1045,6 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
user_id,
device_id,
)
self._notifier.notify_replication()
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Mapping, Optional, Union
from typing import TYPE_CHECKING, Collection, List, Mapping, Optional, Union
from synapse import event_auth
from synapse.api.constants import (
@@ -29,7 +29,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.types import StateMap, StrCollection, get_domain_from_id
from synapse.types import StateMap, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -290,7 +290,7 @@ class EventAuthHandler:
async def get_rooms_that_allow_join(
self, state_ids: StateMap[str]
) -> StrCollection:
) -> Collection[str]:
"""
Generate a list of rooms in which membership allows access to a room.
@@ -331,7 +331,7 @@ class EventAuthHandler:
return result
async def is_user_in_rooms(self, room_ids: StrCollection, user_id: str) -> bool:
async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool:
"""
Check whether a user is a member of any of the provided rooms.

View File

@@ -22,12 +22,11 @@ from enum import Enum
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
AbstractSet,
Collection,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
@@ -48,6 +47,7 @@ from synapse.api.errors import (
FederationError,
FederationPullAttemptBackoffError,
HttpResponseException,
LimitExceededError,
NotFoundError,
RequestSendFailed,
SynapseError,
@@ -70,7 +70,7 @@ from synapse.replication.http.federation import (
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import JsonDict, StrCollection, get_domain_from_id
from synapse.types import JsonDict, get_domain_from_id
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
@@ -171,29 +171,12 @@ class FederationHandler:
self.third_party_event_rules = hs.get_third_party_event_rules()
# Tracks running partial state syncs by room ID.
# Partial state syncs currently only run on the main process, so it's okay to
# track them in-memory for now.
self._active_partial_state_syncs: Set[str] = set()
# Tracks partial state syncs we may want to restart.
# A dictionary mapping room IDs to (initial destination, other destinations)
# tuples.
self._partial_state_syncs_maybe_needing_restart: Dict[
str, Tuple[Optional[str], AbstractSet[str]]
] = {}
# A lock guarding the partial state flag for rooms.
# When the lock is held for a given room, no other concurrent code may
# partial state or un-partial state the room.
self._is_partial_state_room_linearizer = Linearizer(
name="_is_partial_state_room_linearizer"
)
# if this is the main process, fire off a background process to resume
# any partial-state-resync operations which were in flight when we
# were shut down.
if not hs.config.worker.worker_app:
run_as_background_process(
"resume_sync_partial_state_room", self._resume_partial_state_room_sync
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
)
@trace
@@ -437,7 +420,7 @@ class FederationHandler:
)
)
async def try_backfill(domains: StrCollection) -> bool:
async def try_backfill(domains: Collection[str]) -> bool:
# TODO: Should we try multiple of these at a time?
# Number of contacted remote homeservers that have denied our backfill
@@ -604,23 +587,7 @@ class FederationHandler:
self._federation_event_handler.room_queues[room_id] = []
is_host_joined = await self.store.is_host_joined(room_id, self.server_name)
if not is_host_joined:
# We may have old forward extremities lying around if the homeserver left
# the room completely in the past. Clear them out.
#
# Note that this check-then-clear is subject to races where
# * the homeserver is in the room and stops being in the room just after
# the check. We won't reset the forward extremities, but that's okay,
# since they will be almost up to date.
# * the homeserver is not in the room and starts being in the room just
# after the check. This can't happen, since `RoomMemberHandler` has a
# linearizer lock which prevents concurrent remote joins into the same
# room.
# In short, the races either have an acceptable outcome or should be
# impossible.
await self._clean_room_for_join(room_id)
await self._clean_room_for_join(room_id)
try:
# Try the host we successfully got a response to /make_join/
@@ -632,115 +599,93 @@ class FederationHandler:
except ValueError:
pass
async with self._is_partial_state_room_linearizer.queue(room_id):
already_partial_state_room = await self.store.is_partial_state_room(
room_id
)
ret = await self.federation_client.send_join(
host_list, event, room_version_obj
)
ret = await self.federation_client.send_join(
host_list,
event,
room_version_obj,
# Perform a full join when we are already in the room and it is a
# full state room, since we are not allowed to persist a partial
# state join event in a full state room. In the future, we could
# optimize this by always performing a partial state join and
# computing the state ourselves or retrieving it from the remote
# homeserver if necessary.
#
# There's a race where we leave the room, then perform a full join
# anyway. This should end up being fast anyway, since we would
# already have the full room state and auth chain persisted.
partial_state=not is_host_joined or already_partial_state_room,
)
event = ret.event
origin = ret.origin
state = ret.state
auth_chain = ret.auth_chain
auth_chain.sort(key=lambda e: e.depth)
event = ret.event
origin = ret.origin
state = ret.state
auth_chain = ret.auth_chain
auth_chain.sort(key=lambda e: e.depth)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join event: %s", event)
logger.debug("do_invite_join event: %s", event)
# if this is the first time we've joined this room, it's time to add
# a row to `rooms` with the correct room version. If there's already a
# row there, we should override it, since it may have been populated
# based on an invite request which lied about the room version.
#
# federation_client.send_join has already checked that the room
# version in the received create event is the same as room_version_obj,
# so we can rely on it now.
#
await self.store.upsert_room_on_join(
room_id=room_id,
room_version=room_version_obj,
state_events=state,
)
# if this is the first time we've joined this room, it's time to add
# a row to `rooms` with the correct room version. If there's already a
# row there, we should override it, since it may have been populated
# based on an invite request which lied about the room version.
#
# federation_client.send_join has already checked that the room
# version in the received create event is the same as room_version_obj,
# so we can rely on it now.
#
await self.store.upsert_room_on_join(
if ret.partial_state:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
await self.store.store_partial_state_room(
room_id=room_id,
room_version=room_version_obj,
state_events=state,
servers=ret.servers_in_room,
device_lists_stream_id=self.store.get_device_stream_token(),
joined_via=origin,
)
if ret.partial_state and not already_partial_state_room:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
# TODO(faster_joins):
# We may want to reset the partial state info if it's from an
# old, failed partial state join.
# https://github.com/matrix-org/synapse/issues/13000
await self.store.store_partial_state_room(
room_id=room_id,
servers=ret.servers_in_room,
device_lists_stream_id=self.store.get_device_stream_token(),
joined_via=origin,
)
try:
max_stream_id = (
await self._federation_event_handler.process_remote_join(
origin,
room_id,
auth_chain,
state,
event,
room_version_obj,
partial_state=ret.partial_state,
)
)
except PartialStateConflictError:
# This should be impossible, since we hold the lock on the room's
# partial statedness.
logger.error(
"Room %s was un-partial stated while processing remote join.",
try:
max_stream_id = (
await self._federation_event_handler.process_remote_join(
origin,
room_id,
auth_chain,
state,
event,
room_version_obj,
partial_state=ret.partial_state,
)
raise
else:
# Record the join event id for future use (when we finish the full
# join). We have to do this after persisting the event to keep
# foreign key constraints intact.
if ret.partial_state and not already_partial_state_room:
# TODO(faster_joins):
# We may want to reset the partial state info if it's from
# an old, failed partial state join.
# https://github.com/matrix-org/synapse/issues/13000
await self.store.write_partial_state_rooms_join_event_id(
room_id, event.event_id
)
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
# If the join failed, the background process is responsible for
# cleaning up — including unmarking the room as a partial state
)
except PartialStateConflictError as e:
# The homeserver was already in the room and it is no longer partial
# stated. We ought to be doing a local join instead. Turn the error into
# a 429, as a hint to the client to try again.
# TODO(faster_joins): `_should_perform_remote_join` suggests that we may
# do a remote join for restricted rooms even if we have full state.
logger.error(
"Room %s was un-partial stated while processing remote join.",
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
else:
# Record the join event id for future use (when we finish the full
# join). We have to do this after persisting the event to keep foreign
# key constraints intact.
if ret.partial_state:
await self.store.write_partial_state_rooms_join_event_id(
room_id, event.event_id
)
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
# If the join failed, the background process is responsible for
# cleaning up — including unmarking the room as a partial state room.
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this
# room.
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for
# this room.
self._start_partial_state_room_sync(
initial_destination=origin,
other_destinations=ret.servers_in_room,
room_id=room_id,
)
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
initial_destination=origin,
other_destinations=ret.servers_in_room,
room_id=room_id,
)
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
@@ -1715,104 +1660,24 @@ class FederationHandler:
# well.
return None
async def _resume_partial_state_room_sync(self) -> None:
async def _resume_sync_partial_state_room(self) -> None:
"""Resumes resyncing of all partial-state rooms after a restart."""
assert not self.config.worker.worker_app
partial_state_rooms = await self.store.get_partial_state_room_resync_info()
for room_id, resync_info in partial_state_rooms.items():
self._start_partial_state_room_sync(
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
initial_destination=resync_info.joined_via,
other_destinations=resync_info.servers_in_room,
room_id=room_id,
)
def _start_partial_state_room_sync(
self,
initial_destination: Optional[str],
other_destinations: AbstractSet[str],
room_id: str,
) -> None:
"""Starts the background process to resync the state of a partial state room,
if it is not already running.
Args:
initial_destination: the initial homeserver to pull the state from
other_destinations: other homeservers to try to pull the state from, if
`initial_destination` is unavailable
room_id: room to be resynced
"""
async def _sync_partial_state_room_wrapper() -> None:
if room_id in self._active_partial_state_syncs:
# Another local user has joined the room while there is already a
# partial state sync running. This implies that there is a new join
# event to un-partial state. We might find ourselves in one of a few
# scenarios:
# 1. There is an existing partial state sync. The partial state sync
# un-partial states the new join event before completing and all is
# well.
# 2. Before the latest join, the homeserver was no longer in the room
# and there is an existing partial state sync from our previous
# membership of the room. The partial state sync may have:
# a) succeeded, but not yet terminated. The room will not be
# un-partial stated again unless we restart the partial state
# sync.
# b) failed, because we were no longer in the room and remote
# homeservers were refusing our requests, but not yet
# terminated. After the latest join, remote homeservers may
# start answering our requests again, so we should restart the
# partial state sync.
# In the cases where we would want to restart the partial state sync,
# the room would have the partial state flag when the partial state sync
# terminates.
self._partial_state_syncs_maybe_needing_restart[room_id] = (
initial_destination,
other_destinations,
)
return
self._active_partial_state_syncs.add(room_id)
try:
await self._sync_partial_state_room(
initial_destination=initial_destination,
other_destinations=other_destinations,
room_id=room_id,
)
finally:
# Read the room's partial state flag while we still hold the claim to
# being the active partial state sync (so that another partial state
# sync can't come along and mess with it under us).
# Normally, the partial state flag will be gone. If it isn't, then we
# may find ourselves in scenario 2a or 2b as described in the comment
# above, where we want to restart the partial state sync.
is_still_partial_state_room = await self.store.is_partial_state_room(
room_id
)
self._active_partial_state_syncs.remove(room_id)
if room_id in self._partial_state_syncs_maybe_needing_restart:
(
restart_initial_destination,
restart_other_destinations,
) = self._partial_state_syncs_maybe_needing_restart.pop(room_id)
if is_still_partial_state_room:
self._start_partial_state_room_sync(
initial_destination=restart_initial_destination,
other_destinations=restart_other_destinations,
room_id=room_id,
)
run_as_background_process(
desc="sync_partial_state_room", func=_sync_partial_state_room_wrapper
)
async def _sync_partial_state_room(
self,
initial_destination: Optional[str],
other_destinations: AbstractSet[str],
other_destinations: Collection[str],
room_id: str,
) -> None:
"""Background process to resync the state of a partial-state room
@@ -1823,12 +1688,6 @@ class FederationHandler:
`initial_destination` is unavailable
room_id: room to be resynced
"""
# Assume that we run on the main process for now.
# TODO(faster_joins,multiple workers)
# When moving the sync to workers, we need to ensure that
# * `_start_partial_state_room_sync` still prevents duplicate resyncs
# * `_is_partial_state_room_linearizer` correctly guards partial state flags
# for rooms between the workers doing remote joins and resync.
assert not self.config.worker.worker_app
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
@@ -1866,19 +1725,20 @@ class FederationHandler:
logger.info("Handling any pending device list updates")
await self._device_handler.handle_room_un_partial_stated(room_id)
async with self._is_partial_state_room_linearizer.queue(room_id):
logger.info("Clearing partial-state flag for %s", room_id)
new_stream_id = await self.store.clear_partial_state_room(room_id)
if new_stream_id is not None:
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:
logger.info("State resync complete for %s", room_id)
self._storage_controllers.state.notify_room_un_partial_stated(
room_id
)
# Poke the notifier so that other workers see the write to
# the un-partial-stated rooms stream.
self._notifier.notify_replication()
await self._notifier.on_un_partial_stated_room(
room_id, new_stream_id
)
# TODO(faster_joins) update room stats and user directory?
# https://github.com/matrix-org/synapse/issues/12814
# https://github.com/matrix-org/synapse/issues/12815
return
# we raced against more events arriving with partial state. Go round
@@ -1949,9 +1809,9 @@ class FederationHandler:
def _prioritise_destinations_for_partial_state_resync(
initial_destination: Optional[str],
other_destinations: AbstractSet[str],
other_destinations: Collection[str],
room_id: str,
) -> StrCollection:
) -> Collection[str]:
"""Work out the order in which we should ask servers to resync events.
If an `initial_destination` is given, it takes top priority. Otherwise

View File

@@ -80,7 +80,6 @@ from synapse.types import (
PersistedEventPosition,
RoomStreamToken,
StateMap,
StrCollection,
UserID,
get_domain_from_id,
)
@@ -616,7 +615,7 @@ class FederationEventHandler:
@trace
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: StrCollection
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None:
"""Trigger a backfill request to `dest` for the given `room_id`
@@ -1566,7 +1565,7 @@ class FederationEventHandler:
@trace
@tag_args
async def _get_events_and_persist(
self, destination: str, room_id: str, event_ids: StrCollection
self, destination: str, room_id: str, event_ids: Collection[str]
) -> None:
"""Fetch the given events from a server, and persist them as outliers.
@@ -2260,10 +2259,6 @@ class FederationEventHandler:
event_and_contexts, backfilled=backfilled
)
# After persistence we always need to notify replication there may
# be new data.
self._notifier.notify_replication()
if self._ephemeral_messages_enabled:
for event in events:
# If there's an expiry timestamp on the event, schedule its expiry.

View File

@@ -15,13 +15,7 @@
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
from synapse.api.constants import (
AccountDataTypes,
Direction,
EduTypes,
EventTypes,
Membership,
)
from synapse.api.constants import EduTypes, EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig
@@ -63,13 +57,7 @@ class InitialSyncHandler:
self.validator = EventValidator()
self.snapshot_cache: ResponseCache[
Tuple[
str,
Optional[StreamToken],
Optional[StreamToken],
Direction,
int,
bool,
bool,
str, Optional[StreamToken], Optional[StreamToken], str, int, bool, bool
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
@@ -251,7 +239,7 @@ class InitialSyncHandler:
tags = tags_by_room.get(event.room_id)
if tags:
account_data_events.append(
{"type": AccountDataTypes.TAG, "content": {"tags": tags}}
{"type": "m.tag", "content": {"tags": tags}}
)
account_data = account_data_by_room.get(event.room_id, {})
@@ -338,9 +326,7 @@ class InitialSyncHandler:
account_data_events = []
tags = await self.store.get_tags_for_room(user_id, room_id)
if tags:
account_data_events.append(
{"type": AccountDataTypes.TAG, "content": {"tags": tags}}
)
account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
account_data = await self.store.get_account_data_for_room(user_id, room_id)
for account_data_type, content in account_data.items():

View File

@@ -377,7 +377,7 @@ class MessageHandler:
"""
expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
if type(expiry_ts) is not int or event.is_state():
if not isinstance(expiry_ts, int) or event.is_state():
return
# _schedule_expiry_for_event won't actually schedule anything if there's already
@@ -1939,9 +1939,7 @@ class EventCreationHandler:
if event.type == EventTypes.Message:
# We don't want to block sending messages on any presence code. This
# matters as sometimes presence code can take a while.
run_as_background_process(
"bump_presence_active_time", self._bump_active_time, requester.user
)
run_in_background(self._bump_active_time, requester.user)
async def _notify() -> None:
try:

View File

@@ -13,13 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Set
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set
import attr
from twisted.python.failure import Failure
from synapse.api.constants import Direction, EventTypes, Membership
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.events.utils import SerializeEventConfig
@@ -28,7 +28,7 @@ from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.admin._base import assert_user_is_admin
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType
from synapse.types import JsonDict, Requester, StreamKeyType
from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.stringutils import random_string
@@ -391,7 +391,7 @@ class PaginationHandler:
"""
return self._delete_by_id.get(delete_id)
def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]:
def get_delete_ids_by_room(self, room_id: str) -> Optional[Collection[str]]:
"""Get all active delete ids by room
Args:
@@ -448,7 +448,7 @@ class PaginationHandler:
if pagin_config.from_token:
from_token = pagin_config.from_token
elif pagin_config.direction == Direction.FORWARDS:
elif pagin_config.direction == "f":
from_token = (
await self.hs.get_event_sources().get_start_token_for_pagination(
room_id
@@ -476,7 +476,7 @@ class PaginationHandler:
room_id, requester, allow_departed_users=True
)
if pagin_config.direction == Direction.BACKWARDS:
if pagin_config.direction == "b":
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:

View File

@@ -64,13 +64,7 @@ from synapse.replication.tcp.commands import ClearUserSyncsCommand
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
from synapse.storage.databases.main import DataStore
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
StrCollection,
StreamKeyType,
UserID,
get_domain_from_id,
)
from synapse.types import JsonDict, StreamKeyType, UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
@@ -326,7 +320,7 @@ class BasePresenceHandler(abc.ABC):
for destination, host_states in hosts_to_states.items():
self._federation.send_presence_to_destinations(host_states, [destination])
async def send_full_presence_to_users(self, user_ids: StrCollection) -> None:
async def send_full_presence_to_users(self, user_ids: Collection[str]) -> None:
"""
Adds to the list of users who should receive a full snapshot of presence
upon their next sync. Note that this only works for local users.
@@ -1607,7 +1601,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
# Having a default limit doesn't match the EventSource API, but some
# callers do not provide it. It is unused in this class.
limit: int = 0,
room_ids: Optional[StrCollection] = None,
room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
include_offline: bool = True,
@@ -1694,7 +1688,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
# The set of users that we're interested in and that have had a presence update.
# We'll actually pull the presence updates for these users at the end.
interested_and_updated_users: StrCollection
interested_and_updated_users: Collection[str]
if from_key is not None:
# First get all users that have had a presence update
@@ -2126,7 +2120,7 @@ class PresenceFederationQueue:
# stream_id, destinations, user_ids)`. We don't store the full states
# for efficiency, and remote workers will already have the full states
# cached.
self._queue: List[Tuple[int, int, StrCollection, Set[str]]] = []
self._queue: List[Tuple[int, int, Collection[str], Set[str]]] = []
self._next_id = 1
@@ -2148,7 +2142,7 @@ class PresenceFederationQueue:
self._queue = self._queue[index:]
def send_presence_to_destinations(
self, states: Collection[UserPresenceState], destinations: StrCollection
self, states: Collection[UserPresenceState], destinations: Collection[str]
) -> None:
"""Send the presence states to the given destinations.
@@ -2161,11 +2155,6 @@ class PresenceFederationQueue:
# This should only be called on a presence writer.
assert self._presence_writer
if not states or not destinations:
# Ignore calls which either don't have any new states or don't need
# to be sent anywhere.
return
if self._federation:
self._federation.send_presence_to_destinations(
states=states,

View File

@@ -315,5 +315,5 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
return events, to_key
def get_current_key(self) -> int:
def get_current_key(self, direction: str = "f") -> int:
return self.store.get_max_receipt_stream_id()

View File

@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, O
import attr
from synapse.api.constants import Direction, EventTypes, RelationTypes
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
from synapse.logging.context import make_deferred_yieldable, run_in_background
@@ -413,11 +413,7 @@ class RelationsHandler:
# Attempt to find another event to use as the latest event.
potential_events, _ = await self._main_store.get_relations_for_event(
event_id,
event,
room_id,
RelationTypes.THREAD,
direction=Direction.FORWARDS,
event_id, event, room_id, RelationTypes.THREAD, direction="f"
)
# Filter out ignored users.

View File

@@ -20,14 +20,22 @@ import random
import string
from collections import OrderedDict
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Collection,
Dict,
List,
Optional,
Tuple,
)
import attr
from typing_extensions import TypedDict
import synapse.events.snapshot
from synapse.api.constants import (
Direction,
EventContentFields,
EventTypes,
GuestAccess,
@@ -64,7 +72,6 @@ from synapse.types import (
RoomID,
RoomStreamToken,
StateMap,
StrCollection,
StreamKeyType,
StreamToken,
UserID,
@@ -1488,7 +1495,7 @@ class TimestampLookupHandler:
requester: Requester,
room_id: str,
timestamp: int,
direction: Direction,
direction: str,
) -> Tuple[str, int]:
"""Find the closest event to the given timestamp in the given direction.
If we can't find an event locally or the event we have locally is next to a gap,
@@ -1499,7 +1506,7 @@ class TimestampLookupHandler:
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: indicates whether we should navigate forward
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
@@ -1534,13 +1541,13 @@ class TimestampLookupHandler:
local_event_id, allow_none=False, allow_rejected=False
)
if direction == Direction.FORWARDS:
if direction == "f":
# We only need to check for a backward gap if we're looking forwards
# to ensure there is nothing in between.
is_event_next_to_backward_gap = (
await self.store.is_event_next_to_backward_gap(local_event)
)
elif direction == Direction.BACKWARDS:
elif direction == "b":
# We only need to check for a forward gap if we're looking backwards
# to ensure there is nothing in between
is_event_next_to_forward_gap = (
@@ -1637,7 +1644,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
user: UserID,
from_key: RoomStreamToken,
limit: int,
room_ids: StrCollection,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[EventBase], RoomStreamToken]:

Some files were not shown because too many files have changed in this diff Show More