1
0

Compare commits

..

3 Commits

Author SHA1 Message Date
Andrew Morgan
8acd2c01bc lil fix 2022-09-26 16:13:53 +01:00
Andrew Morgan
f1d98d3b70 wip2 2022-09-22 15:54:30 +01:00
Andrew Morgan
6ff8ba5fc6 wip 2022-09-21 17:37:38 +01:00
96 changed files with 580 additions and 1414 deletions

31
.ci/scripts/postgres_exec.py Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="localhost", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View File

@@ -32,7 +32,7 @@ else
fi
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"

View File

@@ -2,27 +2,27 @@
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe -o pipefail
set -xe
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
psql -c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
@@ -45,23 +45,9 @@ rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View File

@@ -8,10 +8,8 @@
!README.rst
!pyproject.toml
!poetry.lock
!Cargo.lock
!build_rust.py
rust/target
synapse/*.so
**/__pycache__

View File

@@ -32,11 +32,9 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
@@ -78,6 +76,7 @@ jobs:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
@@ -94,7 +93,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -112,7 +111,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
components: rustfmt
- uses: Swatinem/rust-cache@v2
@@ -204,7 +203,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
- uses: Swatinem/rust-cache@v2
@@ -320,7 +319,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
- uses: Swatinem/rust-cache@v2
@@ -362,22 +361,18 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
strategy:
matrix:
include:
@@ -403,27 +398,12 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
@@ -452,7 +432,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
- uses: Swatinem/rust-cache@v2
@@ -478,7 +458,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.58.1
toolchain: 1.61.0
override: true
- uses: Swatinem/rust-cache@v2

3
.gitignore vendored
View File

@@ -15,9 +15,8 @@ _trial_temp*/
.DS_Store
__pycache__/
# We do want the poetry and cargo lockfile.
# We do want the poetry lockfile.
!poetry.lock
!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db

View File

@@ -1,117 +1,3 @@
Synapse 1.68.0 (2022-09-27)
===========================
Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27.
In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680).
Bugfixes
--------
- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
Synapse 1.68.0rc2 (2022-09-23)
==============================
Bugfixes
--------
- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
Internal Changes
----------------
- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
Synapse 1.68.0rc1 (2022-09-20)
==============================
Features
--------
- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
Improved Documentation
----------------------
- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
Deprecations and Removals
-------------------------
- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
Internal Changes
----------------
- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
Synapse 1.67.0 (2022-09-13)
===========================
@@ -160,7 +46,7 @@ Bugfixes
- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
- Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
Updates to the Docker image
@@ -187,7 +73,7 @@ Deprecations and Removals
- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
@@ -231,7 +117,7 @@ was originally planned for Synapse 1.64, but was later deferred until now. See
the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
Deployments with multiple workers should note that the direct TCP replication
configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse
configuration was deprecated in Synapse v1.18.0 and will be removed in Synapse
v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
type (not to be confused with the `replication` resource on the `http` listener
type) and the `worker_replication_port` config option will be removed .
@@ -361,7 +247,7 @@ Bugfixes
--------
- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
@@ -426,7 +312,7 @@ No significant changes since 1.64.0rc2.
Deprecation Warning
-------------------
Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
@@ -435,7 +321,7 @@ If you require your homeserver to verify e-mail addresses or to support password
Synapse 1.64.0rc2 (2022-07-29)
==============================
This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
Synapse 1.64.0rc1 (2022-07-26)
@@ -684,7 +570,7 @@ Bugfixes
- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
@@ -1277,7 +1163,7 @@ If you have already upgraded to Synapse 1.57.0 without problem, then you have no
Updates to the Docker image
---------------------------
- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse v1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
Synapse 1.57.0 (2022-04-19)
@@ -1529,10 +1415,10 @@ Features
Bugfixes
--------
- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
@@ -1917,15 +1803,15 @@ Bugfixes
- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events
received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530))
- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587))
- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse v1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791))
- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667))
- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669))
- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
- Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737))
- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775))
- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
- Fix a bug introduced in Synapse v1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
Improved Documentation
@@ -2005,8 +1891,8 @@ This release candidate fixes a federation-breaking regression introduced in Syna
Bugfixes
--------
- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
- Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
- Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
Improved Documentation

324
Cargo.lock generated
View File

@@ -1,324 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
dependencies = [
"digest",
]
[[package]]
name = "block-buffer"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "digest"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "generic-array"
version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "indoc"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "libc"
version = "0.2.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
[[package]]
name = "lock_api"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "once_cell"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-sys",
]
[[package]]
name = "proc-macro2"
version = "1.0.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.16.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0220c44442c9b239dd4357aa856ac468a4f5e1f0df19ddb89b2522952eb4c6ca"
dependencies = [
"cfg-if",
"indoc",
"libc",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.16.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c819d397859445928609d0ec5afc2da5204e0d0f73d6bf9e153b04e83c9cdc2"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.16.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca882703ab55f54702d7bfe1189b41b0af10272389f04cae38fe4cd56c65f75f"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-macros"
version = "0.16.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "568749402955ad7be7bad9a09b8593851cd36e549ac90bfd44079cea500f3f21"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.16.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "611f64e82d98f447787e82b8e7b0ebc681e1eb78fc1252668b2c605ffb4e1eb8"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "quote"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "smallvec"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
"blake2",
"hex",
"pyo3",
]
[[package]]
name = "target-lexicon"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
[[package]]
name = "unindent"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"

1
changelog.d/12595.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13162.misc Normal file
View File

@@ -0,0 +1 @@
Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code.

1
changelog.d/13480.doc Normal file
View File

@@ -0,0 +1 @@
Note that `libpq` is required on ARM-based Macs.

1
changelog.d/13506.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).

View File

@@ -0,0 +1 @@
Keep track when we attempt to backfill an event but fail so we can intelligently back-off in the future.

View File

@@ -0,0 +1 @@
Add admin APIs to fetch messages within a particular window of time.

View File

@@ -0,0 +1 @@
Cancel the processing of key query requests when they time out.

View File

@@ -0,0 +1 @@
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).

1
changelog.d/13703.misc Normal file
View File

@@ -0,0 +1 @@
Add & populate `event_stream_ordering` column on receipts table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13706.misc Normal file
View File

@@ -0,0 +1 @@
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.

1
changelog.d/13707.misc Normal file
View File

@@ -0,0 +1 @@
Update trial old deps CI to use poetry 1.2.0.

1
changelog.d/13714.misc Normal file
View File

@@ -0,0 +1 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

1
changelog.d/13717.misc Normal file
View File

@@ -0,0 +1 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

1
changelog.d/13718.misc Normal file
View File

@@ -0,0 +1 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

1
changelog.d/13724.misc Normal file
View File

@@ -0,0 +1 @@
Fix typechecking with latest types-jsonschema.

1
changelog.d/13725.misc Normal file
View File

@@ -0,0 +1 @@
Update trial old deps CI to use poetry 1.2.0.

1
changelog.d/13726.doc Normal file
View File

@@ -0,0 +1 @@
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.

1
changelog.d/13727.doc Normal file
View File

@@ -0,0 +1 @@
Fix a typo in the documentation for the login ratelimiting configuration.

1
changelog.d/13728.doc Normal file
View File

@@ -0,0 +1 @@
Define Synapse's compatability policy for SQLite versions.

1
changelog.d/13729.misc Normal file
View File

@@ -0,0 +1 @@
Strip number suffix from instance name to consolidate services that traces are spread over.

1
changelog.d/13730.misc Normal file
View File

@@ -0,0 +1 @@
Instrument `get_metadata_for_events` for understandable traces in Jaeger.

1
changelog.d/13734.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13735.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13738.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug where Synapse fails to start if a signing key file contains an empty line.

View File

@@ -0,0 +1 @@
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.

1
changelog.d/13743.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13745.misc Normal file
View File

@@ -0,0 +1 @@
Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13746.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.

1
changelog.d/13748.misc Normal file
View File

@@ -0,0 +1 @@
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.

1
changelog.d/13749.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver.

1
changelog.d/13750.misc Normal file
View File

@@ -0,0 +1 @@
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.

1
changelog.d/13752.misc Normal file
View File

@@ -0,0 +1 @@
User an additional database query when persisting receipts.

1
changelog.d/13753.misc Normal file
View File

@@ -0,0 +1 @@
Prepatory work for storing thread IDs for notifications and receipts.

1
changelog.d/13754.misc Normal file
View File

@@ -0,0 +1 @@
Re-type hint some collections as read-only.

1
changelog.d/13756.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.

1
changelog.d/13759.misc Normal file
View File

@@ -0,0 +1 @@
Add a check for editable installs if the Rust library needs rebuilding.

View File

@@ -0,0 +1 @@
Synapse will now refuse to start if configured to use SQLite < 3.27.

1
changelog.d/13761.misc Normal file
View File

@@ -0,0 +1 @@
Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance.

1
changelog.d/13763.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13765.misc Normal file
View File

@@ -0,0 +1 @@
Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13766.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug where the `cache_invalidation_stream_seq` sequence would begin at 1 instead of 2.

1
changelog.d/13769.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13770.misc Normal file
View File

@@ -0,0 +1 @@
Update the script which makes full schema dumps.

1
changelog.d/13778.misc Normal file
View File

@@ -0,0 +1 @@
Add a stub Rust crate.

1
changelog.d/13780.misc Normal file
View File

@@ -0,0 +1 @@
Deduplicate `is_server_notices_room`.

1
changelog.d/13784.misc Normal file
View File

@@ -0,0 +1 @@
Simplify the dependency DAG in the tests workflow.

1
changelog.d/13788.misc Normal file
View File

@@ -0,0 +1 @@
Remove an old, incorrect migration file.

1
changelog.d/13789.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests.

1
changelog.d/13795.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused method in `synapse.api.auth.Auth`.

1
changelog.d/13798.misc Normal file
View File

@@ -0,0 +1 @@
Fix a memory leak when running the unit tests.

1
changelog.d/13802.misc Normal file
View File

@@ -0,0 +1 @@
Use partial indices on SQLite.

18
debian/changelog vendored
View File

@@ -1,21 +1,3 @@
matrix-synapse-py3 (1.68.0) stable; urgency=medium
* New Synapse release 1.68.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Sep 2022 12:02:09 +0100
matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
* New Synapse release 1.68.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
* New Synapse release 1.68.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
matrix-synapse-py3 (1.67.0) stable; urgency=medium
* New Synapse release 1.67.0.

View File

@@ -31,9 +31,7 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -78,7 +76,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
# install the OS build deps
RUN \
@@ -139,7 +137,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
### Stage 2: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
FROM docker.io/python:${PYTHON_VERSION}-slim
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'

View File

@@ -17,17 +17,7 @@ ARG SYNAPSE_VERSION=latest
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
FROM postgres:13-bullseye AS postgres_base
# initialise the database cluster in /var/lib/postgresql
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
@@ -35,6 +25,18 @@ FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.

View File

@@ -1155,41 +1155,3 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
### Find a user based on their ID in an auth provider
The API is:
```
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
```
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
```json
{
"user_id": "@hello:example.org"
}
```
**Parameters**
The following parameters should be set in the URL:
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
**Errors**
Returns a `404` HTTP status code if no user was found, with a response body like this:
```json
{
"errcode":"M_NOT_FOUND",
"error":"User not found"
}
```
_Added in Synapse 1.68.0._

View File

@@ -126,23 +126,6 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
poetry install --extras all --remove-untracked
```
## ...delete everything and start over from scratch?
```shell
# Stop the current virtualenv if active
$ deactivate
# Remove all of the files from the current environment.
# Don't worry, even though it says "all", this will only
# remove the Poetry virtualenvs for the current project.
$ poetry env remove --all
# Reactivate Poetry shell to create the virtualenv again
$ poetry shell
# Install everything again
$ poetry install --extras all
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
@@ -273,16 +256,6 @@ from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
## Remove outdated egg-info
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
install.
This stores some cached information about dependencies and often conflicts with
letting Poetry do the right thing.
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.

View File

@@ -45,10 +45,6 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
when using a containerized Synapse, as that will prevent it from responding
to proxied traffic.)
Optionally, you can also set
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
so that the server extracts and re-uses the same request ID format that the
reverse proxy is using.
## Reverse-proxy configuration examples

View File

@@ -15,8 +15,9 @@ this document.
The website <https://endoflife.date> also offers convenient
summaries.
- If Synapse was installed using [prebuilt packages](setup/installation.md#prebuilt-packages),
you will need to follow the normal process for upgrading those packages.
- If Synapse was installed using [prebuilt
packages](setup/installation.md#prebuilt-packages), you will need to follow the
normal process for upgrading those packages.
- If Synapse was installed using pip then upgrade to the latest
version by running:
@@ -88,20 +89,6 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.68.0
As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
now require a recent Rust compiler.
Installations using
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
- PyPI wheels via `pip install matrix-synapse` (on supported platforms and architectures)
will not be affected.
# Upgrading to v1.67.0
## Direct TCP replication is no longer supported: migrate to Redis
@@ -138,7 +125,7 @@ From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
higher. Synapse v1.67.0 will be the last major release supporting SQLite
versions 3.22 to 3.26.
Those using Docker images or Debian packages from Matrix.org will not be
Those using docker images or Debian packages from Matrix.org will not be
affected. If you have installed from source, you should check the version of
SQLite used by Python with:
@@ -148,7 +135,6 @@ python -c "import sqlite3; print(sqlite3.sqlite_version)"
If this is too old, refer to your distribution for advice on upgrading.
# Upgrading to v1.66.0
## Delegation of email validation no longer supported

View File

@@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo
| Part | Explanation |
| ----- | ------------ |
| AAAA | Timestamp request was logged (not received) |
| AAAA | Timestamp request was logged (not recieved) |
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
| CCCC | Line number in code |
| DDDD | Log Level |
| EEEE | Request Identifier (This identifier is shared by related log lines)|
| FFFF | Source IP (Or X-Forwarded-For if enabled) |
| GGGG | Server Port |
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).<br/>If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). |
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) |
| IIII | Total Time to process the request |
| JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)|
| KKKK | Userland CPU time |

View File

@@ -434,16 +434,7 @@ Sub-options for each listener include:
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
behind a [reverse-proxy](../../reverse_proxy.md).
* `request_id_header`: The header extracted from each incoming request that is
used as the basis for the request ID. The request ID is used in
[logs](../administration/request_log.md#request-log-format) and tracing to
correlate and match up requests. When unset, Synapse will automatically
generate sequential request IDs. This option is useful when Synapse is behind
a [reverse-proxy](../../reverse_proxy.md).
_Added in Synapse 1.68.0._
behind a reverse-proxy.
* `resources`: Only valid for an 'http' listener. A list of resources to host
on this port. Sub-options for each resource are:

View File

@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.68.0"
version = "1.67.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -86,9 +86,8 @@ include = [
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
{ path = "Cargo.toml", format = "sdist" },
{ path = "Cargo.lock", format = "sdist" },
{ path = "rust/Cargo.toml", format = "sdist" },
{ path = "rust/build.rs", format = "sdist" },
{ path = "rust/Cargo.lock", format = "sdist" },
{ path = "rust/src/**", format = "sdist" },
]
exclude = [

View File

@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
rust-version = "1.58.1"
rust-version = "1.61.0"
[lib]
name = "synapse"

View File

@@ -427,12 +427,11 @@ def _publish(gh_token: str) -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
def upload(gh_token: Optional[str]) -> None:
_upload(gh_token)
def upload() -> None:
_upload()
def _upload(gh_token: Optional[str]) -> None:
def _upload() -> None:
"""Upload release to pypi."""
current_version = get_package_version()
@@ -445,40 +444,18 @@ def _upload(gh_token: Optional[str]) -> None:
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
click.get_current_context().abort()
# Query all the assets corresponding to this release.
gh = Github(gh_token)
gh_repo = gh.get_repo("matrix-org/synapse")
gh_release = gh_repo.get_release(tag_name)
all_assets = set(gh_release.get_assets())
# Only accept the wheels and sdist.
# Notably: we don't care about debs.tar.xz.
asset_names_and_urls = sorted(
(asset.name, asset.browser_download_url)
for asset in all_assets
if asset.name.endswith((".whl", ".tar.gz"))
)
# Print out what we've determined.
print("Found relevant assets:")
for asset_name, _ in asset_names_and_urls:
print(f" - {asset_name}")
ignored_asset_names = sorted(
{asset.name for asset in all_assets}
- {asset_name for asset_name, _ in asset_names_and_urls}
)
print("\nIgnoring irrelevant assets:")
for asset_name in ignored_asset_names:
print(f" - {asset_name}")
pypi_asset_names = [
f"matrix_synapse-{current_version}-py3-none-any.whl",
f"matrix-synapse-{current_version}.tar.gz",
]
with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir:
for name, asset_download_url in asset_names_and_urls:
for name in pypi_asset_names:
filename = path.join(tmpdir, name)
url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}"
click.echo(f"Downloading {name} into {filename}")
urllib.request.urlretrieve(asset_download_url, filename=filename)
urllib.request.urlretrieve(url, filename=filename)
if click.confirm("Upload to PyPI?", default=True):
subprocess.run("twine upload *", shell=True, cwd=tmpdir)
@@ -695,7 +672,7 @@ def full(gh_token: str) -> None:
_publish(gh_token)
click.echo("\n*** upload ***")
_upload(gh_token)
_upload()
click.echo("\n*** merge back ***")
_merge_back()

View File

@@ -206,7 +206,6 @@ class HttpListenerConfig:
resources: List[HttpResourceConfig] = attr.Factory(list)
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -521,11 +520,9 @@ class ServerConfig(Config):
):
raise ConfigError("allowed_avatar_mimetypes must be a list")
listeners = config.get("listeners", [])
if not isinstance(listeners, list):
raise ConfigError("Expected a list", ("listeners",))
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
self.listeners = [
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
]
# no_tls is not really supported any more, but let's grandfather it in
# here.
@@ -892,9 +889,6 @@ def read_gc_thresholds(
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
if not isinstance(listener, dict):
raise ConfigError("Expected a dictionary", ("listeners", str(num)))
listener_type = listener["type"]
# Raise a helpful error if direct TCP replication is still configured.
if listener_type == "replication":
@@ -934,7 +928,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)

View File

@@ -167,6 +167,7 @@ class EventBuilder:
"content": self.content,
"unsigned": self.unsigned,
"depth": depth,
"prev_state": [],
}
if self.is_state():

View File

@@ -906,6 +906,9 @@ class FederationClient(FederationBase):
# The protoevent received over the JSON wire may not have all
# the required fields. Lets just gloss over that because
# there's some we never care about
if "prev_state" not in pdu_dict:
pdu_dict["prev_state"] = []
ev = builder.create_local_event_from_event_dict(
self._clock,
self.hostname,

View File

@@ -188,21 +188,18 @@ class E2eKeysHandler:
)
invalid_cached_users = cached_users - valid_cached_users
if invalid_cached_users:
# Fix up results. If we get here, it means there was either a bug in
# device list tracking, or we hit the race mentioned above.
# TODO: In practice, this path is hit fairly often in existing
# deployments when clients query the keys of departed remote
# users. A background update to mark the appropriate device
# lists as unsubscribed is needed.
# https://github.com/matrix-org/synapse/issues/13651
# Note that this currently introduces a failure mode when clients
# are trying to decrypt old messages from a remote user whose
# homeserver is no longer available. We may want to consider falling
# back to the cached data when we fail to retrieve a device list
# over federation for such remote users.
# Fix up results. If we get here, there is either a bug in device
# list tracking, or we hit the race mentioned above.
user_ids_not_in_cache.update(invalid_cached_users)
for invalid_user_id in invalid_cached_users:
remote_results.pop(invalid_user_id)
# This log message may be removed if it turns out it's almost
# entirely triggered by races.
logger.error(
"Devices for %s were cached, but the server no longer shares "
"any rooms with them. The cached device lists are stale.",
invalid_cached_users,
)
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})

View File

@@ -866,11 +866,6 @@ class FederationEventHandler:
event.room_id, event_id, str(err)
)
return
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
try:
try:
@@ -913,11 +908,6 @@ class FederationEventHandler:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
@trace
async def _compute_event_context_with_maybe_missing_prevs(

View File

@@ -72,12 +72,10 @@ class SynapseRequest(Request):
site: "SynapseSite",
*args: Any,
max_request_body_size: int = 1024,
request_id_header: Optional[str] = None,
**kw: Any,
):
super().__init__(channel, *args, **kw)
self._max_request_body_size = max_request_body_size
self.request_id_header = request_id_header
self.synapse_site = site
self.reactor = site.reactor
self._channel = channel # this is used by the tests
@@ -174,14 +172,7 @@ class SynapseRequest(Request):
self._opentracing_span = span
def get_request_id(self) -> str:
request_id_value = None
if self.request_id_header:
request_id_value = self.getHeader(self.request_id_header)
if request_id_value is None:
request_id_value = str(self.request_seq)
return "%s-%s" % (self.get_method(), request_id_value)
return "%s-%i" % (self.get_method(), self.request_seq)
def get_redacted_uri(self) -> str:
"""Gets the redacted URI associated with the request (or placeholder if the URI
@@ -620,15 +611,12 @@ class SynapseSite(Site):
proxied = config.http_options.x_forwarded
request_class = XForwardedForRequest if proxied else SynapseRequest
request_id_header = config.http_options.request_id_header
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,
self,
max_request_body_size=max_request_body_size,
queued=queued,
request_id_header=request_id_header,
)
self.requestFactory = request_factory # type: ignore

View File

@@ -80,7 +80,6 @@ from synapse.rest.admin.users import (
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
UserByExternalId,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
@@ -276,7 +275,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ListDestinationsRestServlet(hs).register(http_server)
RoomMessagesRestServlet(hs).register(http_server)
RoomTimestampToEventRestServlet(hs).register(http_server)
UserByExternalId(hs).register(http_server)
# Some servlets only get registered for the main process.
if hs.config.worker.worker_app is None:

View File

@@ -1156,30 +1156,3 @@ class AccountDataRestServlet(RestServlet):
"rooms": by_room_data,
},
}
class UserByExternalId(RestServlet):
"""Find a user based on an external ID from an auth provider"""
PATTERNS = admin_patterns(
"/auth_providers/(?P<provider>[^/]*)/users/(?P<external_id>[^/]*)"
)
def __init__(self, hs: "HomeServer"):
self._auth = hs.get_auth()
self._store = hs.get_datastores().main
async def on_GET(
self,
request: SynapseRequest,
provider: str,
external_id: str,
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self._auth, request)
user_id = await self._store.get_user_by_external_id(provider, external_id)
if user_id is None:
raise NotFoundError("User not found")
return HTTPStatus.OK, {"user_id": user_id}

View File

@@ -19,7 +19,6 @@ from typing import TYPE_CHECKING, List, Optional, Tuple
from urllib.parse import urlparse
from pydantic import StrictBool, StrictStr, constr
from typing_extensions import Literal
from twisted.web.server import Request
@@ -44,7 +43,6 @@ from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.rest.client.models import (
AuthenticationData,
ClientSecretStr,
EmailRequestTokenBody,
MsisdnRequestTokenBody,
)
@@ -629,11 +627,6 @@ class ThreepidAddRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
class PostBody(RequestBodyModel):
auth: Optional[AuthenticationData] = None
client_secret: ClientSecretStr
sid: StrictStr
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
@@ -643,17 +636,22 @@ class ThreepidAddRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_and_validate_json_object_from_request(request, self.PostBody)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "sid"])
sid = body["sid"]
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body.dict(exclude_unset=True),
body,
"add a third-party identifier to your account",
)
validation_session = await self.identity_handler.validate_threepid_session(
body.client_secret, body.sid
client_secret, sid
)
if validation_session:
await self.auth_handler.add_threepid(
@@ -678,20 +676,23 @@ class ThreepidBindRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
class PostBody(RequestBodyModel):
client_secret: ClientSecretStr
id_access_token: StrictStr
id_server: StrictStr
sid: StrictStr
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_and_validate_json_object_from_request(request, self.PostBody)
body = parse_json_object_from_request(request)
assert_params_in_dict(
body, ["id_server", "sid", "id_access_token", "client_secret"]
)
id_server = body["id_server"]
sid = body["sid"]
id_access_token = body["id_access_token"]
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
await self.identity_handler.bind_threepid(
body.client_secret, body.sid, user_id, body.id_server, body.id_access_token
client_secret, sid, user_id, id_server, id_access_token
)
return 200, {}
@@ -707,27 +708,23 @@ class ThreepidUnbindRestServlet(RestServlet):
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastores().main
class PostBody(RequestBodyModel):
address: StrictStr
id_server: Optional[StrictStr] = None
medium: Literal["email", "msisdn"]
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
requester = await self.auth.get_user_by_req(request)
body = parse_and_validate_json_object_from_request(request, self.PostBody)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
medium = body.get("medium")
address = body.get("address")
id_server = body.get("id_server")
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
# unbind from all identity servers this threepid has been added to in the past
result = await self.identity_handler.try_unbind_threepid(
requester.user.to_string(),
{
"address": body.address,
"medium": body.medium,
"id_server": body.id_server,
},
{"address": address, "medium": medium, "id_server": id_server},
)
return 200, {"id_server_unbind_result": "success" if result else "no-support"}
@@ -741,25 +738,21 @@ class ThreepidDeleteRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
class PostBody(RequestBodyModel):
address: StrictStr
id_server: Optional[StrictStr] = None
medium: Literal["email", "msisdn"]
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
body = parse_and_validate_json_object_from_request(request, self.PostBody)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
try:
ret = await self.auth_handler.delete_threepid(
user_id, body.medium, body.address, body.id_server
user_id, body["medium"], body["address"], body.get("id_server")
)
except Exception:
# NB. This endpoint should succeed if there is nothing to

View File

@@ -36,20 +36,18 @@ class AuthenticationData(RequestBodyModel):
type: Optional[StrictStr] = None
if TYPE_CHECKING:
ClientSecretStr = StrictStr
else:
# See also assert_valid_client_secret()
ClientSecretStr = constr(
regex="[0-9a-zA-Z.=_-]", # noqa: F722
min_length=1,
max_length=255,
strict=True,
)
class ThreePidRequestTokenBody(RequestBodyModel):
if TYPE_CHECKING:
client_secret: StrictStr
else:
# See also assert_valid_client_secret()
client_secret: constr(
regex="[0-9a-zA-Z.=_-]", # noqa: F722
min_length=0,
max_length=255,
strict=True,
)
class ThreepidRequestTokenBody(RequestBodyModel):
client_secret: ClientSecretStr
id_server: Optional[StrictStr]
id_access_token: Optional[StrictStr]
next_link: Optional[StrictStr]
@@ -64,7 +62,7 @@ class ThreepidRequestTokenBody(RequestBodyModel):
return token
class EmailRequestTokenBody(ThreepidRequestTokenBody):
class EmailRequestTokenBody(ThreePidRequestTokenBody):
email: StrictStr
# Canonicalise the email address. The addresses are all stored canonicalised
@@ -82,6 +80,6 @@ else:
ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
class MsisdnRequestTokenBody(ThreepidRequestTokenBody):
class MsisdnRequestTokenBody(ThreePidRequestTokenBody):
country: ISO3116_1_Alpha_2
phone_number: StrictStr

View File

@@ -577,21 +577,6 @@ async def _iterative_auth_checks(
if ev.rejected_reason is None:
auth_events[key] = event_map[ev_id]
if event.rejected_reason is not None:
# Do not admit previously rejected events into state.
# TODO: This isn't spec compliant. Events that were previously rejected due
# to failing auth checks at their state, but pass auth checks during
# state resolution should be accepted. Synapse does not handle the
# change of rejection status well, so we preserve the previous
# rejection status for now.
#
# Note that events rejected for non-state reasons, such as having the
# wrong auth events, should remain rejected.
#
# https://spec.matrix.org/v1.2/rooms/v9/#rejected-events
# https://github.com/matrix-org/synapse/issues/13797
continue
try:
event_auth.check_state_dependent_auth_rules(
event,

View File

@@ -533,7 +533,6 @@ class BackgroundUpdater:
index_name: name of index to add
table: table to add index to
columns: columns/expressions to include in index
where_clause: A WHERE clause to specify a partial unique index.
unique: true to make a UNIQUE index
psql_only: true to only create this index on psql databases (useful
for virtual sqlite tables)

View File

@@ -1191,7 +1191,6 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
@@ -1204,7 +1203,6 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
lock: True to lock the table when doing the upsert. Unused when performing
a native upsert.
Returns:
@@ -1215,12 +1213,7 @@ class DatabasePool:
if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_txn_native_upsert(
txn,
table,
keyvalues,
values,
insertion_values=insertion_values,
where_clause=where_clause,
txn, table, keyvalues, values, insertion_values=insertion_values
)
else:
return self.simple_upsert_txn_emulated(
@@ -1229,7 +1222,6 @@ class DatabasePool:
keyvalues,
values,
insertion_values=insertion_values,
where_clause=where_clause,
lock=lock,
)
@@ -1240,7 +1232,6 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
@@ -1249,7 +1240,6 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
lock: True to lock the table when doing the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@@ -1269,17 +1259,14 @@ class DatabasePool:
else:
return "%s = ?" % (key,)
# Generate a where clause of each keyvalue and optionally the provided
# index predicate.
where = [_getwhere(k) for k in keyvalues]
if where_clause:
where.append(where_clause)
if not values:
# If `values` is empty, then all of the values we care about are in
# the unique key, so there is nothing to UPDATE. We can just do a
# SELECT instead to see if it exists.
sql = "SELECT 1 FROM %s WHERE %s" % (table, " AND ".join(where))
sql = "SELECT 1 FROM %s WHERE %s" % (
table,
" AND ".join(_getwhere(k) for k in keyvalues),
)
sqlargs = list(keyvalues.values())
txn.execute(sql, sqlargs)
if txn.fetchall():
@@ -1290,7 +1277,7 @@ class DatabasePool:
sql = "UPDATE %s SET %s WHERE %s" % (
table,
", ".join("%s = ?" % (k,) for k in values),
" AND ".join(where),
" AND ".join(_getwhere(k) for k in keyvalues),
)
sqlargs = list(values.values()) + list(keyvalues.values())
@@ -1320,7 +1307,6 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
) -> bool:
"""
Use the native UPSERT functionality in PostgreSQL.
@@ -1330,7 +1316,6 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@@ -1346,12 +1331,11 @@ class DatabasePool:
allvalues.update(values)
latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % (
sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues),
", ".join(k for k in keyvalues),
f"WHERE {where_clause}" if where_clause else "",
latter,
)
txn.execute(sql, list(allvalues.values()))

View File

@@ -194,7 +194,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
# changed its content in the database. We can't call
# self._invalidate_cache_and_stream because self.get_event_cache isn't of the
# right type.
self.invalidate_get_event_cache_after_txn(txn, event.event_id)
self.invalidate_get_event_cache_by_event_id_after_txn(txn, event.event_id)
# Send that invalidation to replication so that other workers also invalidate
# the event cache.
self._send_invalidation_to_replication(

View File

@@ -1294,8 +1294,10 @@ class PersistEventsStore:
"""
depth_updates: Dict[str, int] = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
self.store.invalidate_get_event_cache_after_txn(txn, event.event_id)
# Remove any existing cache entries for the event_ids
self.store.invalidate_get_event_cache_by_event_id_after_txn(
txn, event.event_id
)
# Then update the `stream_ordering` position to mark the latest
# event as the front of the room. This should not be done for
# backfilled events because backfilled events have negative
@@ -1703,7 +1705,7 @@ class PersistEventsStore:
_invalidate_caches_for_event.
"""
assert event.redacts is not None
self.store.invalidate_get_event_cache_after_txn(txn, event.redacts)
self.store.invalidate_get_event_cache_by_event_id_after_txn(txn, event.redacts)
txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))

View File

@@ -80,6 +80,7 @@ from synapse.types import JsonDict, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.dual_lookup_cache import DualLookupCache
from synapse.util.caches.lrucache import AsyncLruCache
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
@@ -245,6 +246,8 @@ class EventsWorkerStore(SQLBaseStore):
] = AsyncLruCache(
cache_name="*getEvent*",
max_size=hs.config.caches.event_cache_size,
cache_type=DualLookupCache,
dual_lookup_secondary_key_function=lambda v: (v.event.room_id,),
)
# Map from event ID to a deferred that will result in a map from event
@@ -733,7 +736,7 @@ class EventsWorkerStore(SQLBaseStore):
return event_entry_map
def invalidate_get_event_cache_after_txn(
def invalidate_get_event_cache_by_event_id_after_txn(
self, txn: LoggingTransaction, event_id: str
) -> None:
"""
@@ -747,10 +750,31 @@ class EventsWorkerStore(SQLBaseStore):
event_id: the event ID to be invalidated from caches
"""
txn.async_call_after(self._invalidate_async_get_event_cache, event_id)
txn.call_after(self._invalidate_local_get_event_cache, event_id)
txn.async_call_after(
self._invalidate_async_get_event_cache_by_event_id, event_id
)
txn.call_after(self._invalidate_local_get_event_cache_by_event_id, event_id)
async def _invalidate_async_get_event_cache(self, event_id: str) -> None:
def invalidate_get_event_cache_by_room_id_after_txn(
self, txn: LoggingTransaction, room_id: str
) -> None:
"""
Prepares a database transaction to invalidate the get event cache for a given
room ID when executed successfully. This is achieved by attaching two callbacks
to the transaction, one to invalidate the async cache and one for the in memory
sync cache (importantly called in that order).
Arguments:
txn: the database transaction to attach the callbacks to.
room_id: the room ID to invalidate all associated event caches for.
"""
txn.async_call_after(self._invalidate_async_get_event_cache_by_room_id, room_id)
txn.call_after(self._invalidate_local_get_event_cache_by_room_id, room_id)
async def _invalidate_async_get_event_cache_by_event_id(
self, event_id: str
) -> None:
"""
Invalidates an event in the asyncronous get event cache, which may be remote.
@@ -760,7 +784,18 @@ class EventsWorkerStore(SQLBaseStore):
await self._get_event_cache.invalidate((event_id,))
def _invalidate_local_get_event_cache(self, event_id: str) -> None:
async def _invalidate_async_get_event_cache_by_room_id(self, room_id: str) -> None:
"""
Invalidates all events associated with a given room in the asyncronous get event
cache, which may be remote.
Arguments:
room_id: the room ID to invalidate associated events of.
"""
await self._get_event_cache.invalidate((room_id,))
def _invalidate_local_get_event_cache_by_event_id(self, event_id: str) -> None:
"""
Invalidates an event in local in-memory get event caches.
@@ -772,6 +807,18 @@ class EventsWorkerStore(SQLBaseStore):
self._event_ref.pop(event_id, None)
self._current_event_fetches.pop(event_id, None)
def _invalidate_local_get_event_cache_by_room_id(self, room_id: str) -> None:
"""
Invalidates all events associated with a given room ID in local in-memory
get event caches.
Arguments:
room_id: the room ID to invalidate events of.
"""
self._get_event_cache.invalidate_local((room_id,))
# TODO: invalidate _event_ref and _current_event_fetches. How?
async def _get_events_from_cache(
self, events: Iterable[str], update_metrics: bool = True
) -> Dict[str, EventCacheEntry]:
@@ -2284,7 +2331,7 @@ class EventsWorkerStore(SQLBaseStore):
updatevalues={"rejection_reason": rejection_reason},
)
self.invalidate_get_event_cache_after_txn(txn, event_id)
self.invalidate_get_event_cache_by_event_id_after_txn(txn, event_id)
# TODO(faster_joins): invalidate the cache on workers. Ideally we'd just
# call '_send_invalidation_to_replication', but we actually need the other

View File

@@ -304,7 +304,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
self._invalidate_cache_and_stream(
txn, self.have_seen_event, (room_id, event_id)
)
self.invalidate_get_event_cache_after_txn(txn, event_id)
self.invalidate_get_event_cache_by_event_id_after_txn(txn, event_id)
logger.info("[purge] done")
@@ -419,7 +419,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"event_forward_extremities",
"event_push_actions",
"event_search",
"event_failed_pull_attempts",
"partial_state_events",
"events",
"federation_inbound_events_staging",
@@ -442,10 +441,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"e2e_room_keys",
"event_push_summary",
"pusher_throttle",
"insertion_events",
"insertion_event_extremities",
"insertion_event_edges",
"batch_events",
"room_account_data",
"room_tags",
# "rooms" happens last, to keep the foreign keys in the other tables
@@ -483,6 +478,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# XXX: as with purge_history, this is racy, but no worse than other races
# that already exist.
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
self._invalidate_local_get_event_cache_by_room_id(room_id)
logger.info("[purge] done")

View File

@@ -83,8 +83,6 @@ Changes in SCHEMA_VERSION = 73;
event_push_summary, receipts_linearized, and receipts_graph.
- Add table `event_failed_pull_attempts` to keep track when we fail to pull
events over federation.
- Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`,
`batch_events`) to make it easy to delete all associated rows when purging a room.
"""

View File

@@ -1,22 +0,0 @@
/* Copyright 2022 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Add index so we can easily purge all rows from a given `room_id`
CREATE INDEX IF NOT EXISTS event_failed_pull_attempts_room_id ON event_failed_pull_attempts(room_id);
-- MSC2716 related tables:
-- Add indexes so we can easily purge all rows from a given `room_id`
CREATE INDEX IF NOT EXISTS insertion_events_room_id ON insertion_events(room_id);
CREATE INDEX IF NOT EXISTS batch_events_room_id ON batch_events(room_id);

View File

@@ -0,0 +1,238 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
Callable,
Dict,
Generic,
ItemsView,
List,
Optional,
TypeVar,
Union,
ValuesView,
)
# Used to discern between a value not existing in a map, or the value being 'None'.
SENTINEL = object()
# The type of the primary dict's keys.
PKT = TypeVar("PKT")
# The type of the primary dict's values.
PVT = TypeVar("PVT")
# The type of the secondary dict's keys.
SKT = TypeVar("SKT")
logger = logging.getLogger(__name__)
class SecondarySet(set):
"""
Used to differentiate between an entry in the secondary_dict, and a set stored
in the primary_dict. This is necessary as pop() can return either.
"""
class DualLookupCache(Generic[PKT, PVT, SKT]):
"""
A backing store for LruCache that supports multiple entry points.
Allows subsets of data to be deleted efficiently without requiring extra
information to query.
The data structure is two dictionaries:
* primary_dict containing a mapping of primary_key -> value.
* secondary_dict containing a mapping of secondary_key -> set of primary_key.
On insert, a mapping in the primary_dict must be created. A mapping in the
secondary_dict from a secondary_key to (a set containing) the same
primary_key will be made. The secondary_key
must be derived from the inserted value via a lambda function provided at cache
initialisation. This is so invalidated entries in the primary_dict may automatically
invalidate those in the secondary_dict. The secondary_key may be associated with one
or more primary_key's.
This creates an interface which allows for efficient lookups of a value given
a primary_key, as well as efficient invalidation of a subset of mapping in the
primary_dict given a secondary_key. A primary_key may not be associated with more
than one secondary_key.
As a worked example, consider storing a cache of room events. We could configure
the cache to store mappings between EventIDs and EventBase in the primary_dict,
while storing a mapping between room IDs and event IDs as the secondary_dict:
primary_dict: EventID -> EventBase
secondary_dict: RoomID -> {EventID, EventID, ...}
This would be efficient for the following operations:
* Given an EventID, look up the associated EventBase, and thus the roomID.
* Given a RoomID, invalidate all primary_dict entries for events in that room.
Since this is intended as a backing store for LRUCache, when it came time to evict
an entry from the primary_dict (EventID -> EventBase), the secondary_key could be
derived from a provided lambda function:
secondary_key = lambda event_base: event_base.room_id
The EventID set under room_id would then have the appropriate EventID entry evicted.
"""
def __init__(self, secondary_key_function: Callable[[PVT], SKT]) -> None:
self._primary_dict: Dict[PKT, PVT] = {}
self._secondary_dict: Dict[SKT, SecondarySet] = {}
self._secondary_key_function = secondary_key_function
def __setitem__(self, key: PKT, value: PVT) -> None:
self.set(key, value)
def __contains__(self, key: PKT) -> bool:
return key in self._primary_dict
def set(self, key: PKT, value: PVT) -> None:
"""Add an entry to the cache.
Will add an entry to the primary_dict consisting of key->value, as well as append
to the set referred to by secondary_key_function(value) in the secondary_dict.
Args:
key: The key for a new mapping in primary_dict.
value: The value for a new mapping in primary_dict.
"""
# Create an entry in the primary_dict.
self._primary_dict[key] = value
# Derive the secondary_key to use from the given primary_value.
secondary_key = self._secondary_key_function(value)
# TODO: If the lambda function resolves to None, don't insert an entry?
# And create a mapping in the secondary_dict to a set containing the
# primary_key, creating the set if necessary.
secondary_key_set = self._secondary_dict.setdefault(
secondary_key, SecondarySet()
)
secondary_key_set.add(key)
logger.info("*** Insert into primary_dict: %s: %s", key, value)
logger.info("*** Insert into secondary_dict: %s: %s", secondary_key, key)
def get(self, key: PKT, default: Optional[PVT] = None) -> Optional[PVT]:
"""Retrieve a value from the cache if it exists. If not, return the default
value.
This method simply pulls entries from the primary_dict.
# TODO: Any use cases for externally getting entries from the secondary_dict?
Args:
key: The key to search the cache for.
default: The default value to return if the given key is not found.
Returns:
The value referenced by the given key, if it exists in the cache. If not,
the value of `default` will be returned.
"""
logger.info("*** Retrieving key from primary_dict: %s", key)
return self._primary_dict.get(key, default)
def clear(self) -> None:
"""Evicts all entries from the cache."""
self._primary_dict.clear()
self._secondary_dict.clear()
def pop(
self, key: Union[PKT, SKT], default: Optional[Union[Dict[PKT, PVT], PVT]] = None
) -> Optional[Union[Dict[PKT, PVT], PVT]]:
"""Remove an entry from either the primary_dict or secondary_dict.
The primary_dict is checked first for the key. If an entry is found, it is
removed from the primary_dict and returned.
If no entry in the primary_dict exists, then the secondary_dict is checked.
If an entry exists, all associated entries in the primary_dict will be
deleted, and all primary_dict keys returned from this function in a SecondarySet.
Args:
key: A key to drop from either the primary_dict or secondary_dict.
default: The default value if the key does not exist in either dict.
Returns:
Either a matched value from the primary_dict or the secondary_dict. If no
value is found for the key, then None.
"""
# Attempt to remove from the primary_dict first.
primary_value = self._primary_dict.pop(key, SENTINEL)
if primary_value is not SENTINEL:
# We found a value in the primary_dict. Remove it from the corresponding
# entry in the secondary_dict, and then return it.
logger.info(
"*** Popped entry from primary_dict: %s: %s", key, primary_value
)
# Derive the secondary_key from the primary_value
secondary_key = self._secondary_key_function(primary_value)
# Pop the entry from the secondary_dict
secondary_key_set = self._secondary_dict[secondary_key]
if len(secondary_key_set) > 1:
# Delete just the set entry for the given key.
secondary_key_set.remove(key)
logger.info(
"*** Popping from secondary_dict: %s: %s", secondary_key, key
)
else:
# Delete the entire set referenced by the secondary_key, as it only
# has one entry.
del self._secondary_dict[secondary_key]
logger.info("*** Popping from secondary_dict: %s", secondary_key)
return primary_value
# There was no matching value in the primary_dict. Attempt the secondary_dict.
primary_key_set = self._secondary_dict.pop(key, SENTINEL)
if primary_key_set is not SENTINEL:
# We found a set in the secondary_dict.
logger.info(
"*** Found '%s' in secondary_dict: %s: ",
key,
primary_key_set,
)
popped_primary_dict_values: List[PVT] = []
# We found an entry in the secondary_dict. Delete all related entries in the
# primary_dict.
logger.info(
"*** Found key in secondary_dict to pop: %s. "
"Popping primary_dict entries",
key,
)
for primary_key in primary_key_set:
primary_value = self._primary_dict.pop(primary_key)
logger.info("*** Popping entry from primary_dict: %s - %s", primary_key, primary_value)
logger.info("*** primary_dict: %s", self._primary_dict)
popped_primary_dict_values.append(primary_value)
# Now return the unmodified copy of the set.
return popped_primary_dict_values
# No match in either dict.
return default
def values(self) -> ValuesView:
return self._primary_dict.values()
def items(self) -> ItemsView:
return self._primary_dict.items()
def __len__(self) -> int:
return len(self._primary_dict)

View File

@@ -46,8 +46,10 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
from synapse.metrics.jemalloc import get_jemalloc_stats
from synapse.util import Clock, caches
from synapse.util.caches import CacheMetric, EvictionReason, register_cache
from synapse.util.caches.dual_lookup_cache import DualLookupCache, SecondarySet
from synapse.util.caches.treecache import (
TreeCache,
TreeCacheNode,
iterate_tree_cache_entry,
iterate_tree_cache_items,
)
@@ -375,12 +377,13 @@ class LruCache(Generic[KT, VT]):
self,
max_size: int,
cache_name: Optional[str] = None,
cache_type: Type[Union[dict, TreeCache]] = dict,
cache_type: Type[Union[dict, TreeCache, DualLookupCache]] = dict,
size_callback: Optional[Callable[[VT], int]] = None,
metrics_collection_callback: Optional[Callable[[], None]] = None,
apply_cache_factor_from_config: bool = True,
clock: Optional[Clock] = None,
prune_unread_entries: bool = True,
dual_lookup_secondary_key_function: Optional[Callable[[Any], Any]] = None,
):
"""
Args:
@@ -411,6 +414,10 @@ class LruCache(Generic[KT, VT]):
prune_unread_entries: If True, cache entries that haven't been read recently
will be evicted from the cache in the background. Set to False to
opt-out of this behaviour.
# TODO: At this point we should probably just pass an initialised cache type
# to LruCache, no?
dual_lookup_secondary_key_function:
"""
# Default `clock` to something sensible. Note that we rename it to
# `real_clock` so that mypy doesn't think its still `Optional`.
@@ -419,7 +426,30 @@ class LruCache(Generic[KT, VT]):
else:
real_clock = clock
cache: Union[Dict[KT, _Node[KT, VT]], TreeCache] = cache_type()
# TODO: I've had to make this ugly to appease mypy :(
# Perhaps initialise the backing cache and then pass to LruCache?
cache: Union[Dict[KT, _Node[KT, VT]], TreeCache, DualLookupCache]
if cache_type is DualLookupCache:
# The dual_lookup_secondary_key_function is a function that's intended to
# extract a key from the value in the cache. Since we wrap values given to
# us in a _Node object, this function will actually operate on a _Node,
# instead of directly on the object type callers are expecting.
#
# Thus, we wrap the function given by the caller in another one that
# extracts the value from the _Node, before then handing it off to the
# given function for processing.
def key_function_wrapper(node: Any) -> Any:
assert dual_lookup_secondary_key_function is not None
return dual_lookup_secondary_key_function(node.value)
cache = DualLookupCache(
secondary_key_function=key_function_wrapper,
)
elif cache_type is TreeCache:
cache = TreeCache()
else:
cache = {}
self.cache = cache # Used for introspection.
self.apply_cache_factor_from_config = apply_cache_factor_from_config
@@ -722,13 +752,25 @@ class LruCache(Generic[KT, VT]):
may be of lower cardinality than the TreeCache - in which case the whole
subtree is deleted.
"""
popped = cache.pop(key, None)
if popped is None:
# Remove an entry from the cache.
# In the case of a 'dict' cache type, we're just removing an entry from the
# dict. For a TreeCache, we're removing a subtree which has children.
popped_entry: _Node[KT, VT] = cache.pop(key, None)
if popped_entry is None:
return
# for each deleted node, we now need to remove it from the linked list
# and run its callbacks.
for leaf in iterate_tree_cache_entry(popped):
delete_node(leaf)
if isinstance(popped_entry, TreeCacheNode):
# We've popped a subtree from a TreeCache - now we need to clean up
# each child node.
for leaf in iterate_tree_cache_entry(popped_entry):
# For each deleted child node, we remove it from the linked list and
# run its callbacks.
delete_node(leaf)
elif isinstance(popped_entry, SecondarySet):
for leaf in popped_entry:
delete_node(leaf)
else:
delete_node(popped_entry)
@synchronized
def cache_clear() -> None:

View File

@@ -11,23 +11,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from unittest import mock
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersion
from synapse.event_auth import (
check_state_dependent_auth_rules,
check_state_independent_auth_rules,
)
from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.federation.transport.client import StateRequestResponse
from synapse.logging.context import LoggingContext
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort
from synapse.types import JsonDict
from tests import unittest
from tests.test_utils import event_injection, make_awaitable
@@ -458,393 +449,3 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
main_store.get_event(pulled_event.event_id, allow_none=True)
)
self.assertIsNotNone(persisted, "pulled event was not persisted at all")
def test_process_pulled_event_with_rejected_missing_state(self) -> None:
"""Ensure that we correctly handle pulled events with missing state containing a
rejected state event
In this test, we pretend we are processing a "pulled" event (eg, via backfill
or get_missing_events). The pulled event has a prev_event we haven't previously
seen, so the server requests the state at that prev_event. We expect the server
to make a /state request.
We simulate a remote server whose /state includes a rejected kick event for a
local user. Notably, the kick event is rejected only because it cites a rejected
auth event and would otherwise be accepted based on the room state. During state
resolution, we re-run auth and can potentially introduce such rejected events
into the state if we are not careful.
We check that the pulled event is correctly persisted, and that the state
afterwards does not include the rejected kick.
"""
# The DAG we are testing looks like:
#
# ...
# |
# v
# remote admin user joins
# | |
# +-------+ +-------+
# | |
# | rejected power levels
# | from remote server
# | |
# | v
# | rejected kick of local user
# v from remote server
# new power levels |
# | v
# | missing event
# | from remote server
# | |
# +-------+ +-------+
# | |
# v v
# pulled event
# from remote server
#
# (arrows are in the opposite direction to prev_events.)
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
main_store = self.hs.get_datastores().main
# Create the room.
kermit_user_id = self.register_user("kermit", "test")
kermit_tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(
room_creator=kermit_user_id, tok=kermit_tok
)
room_version = self.get_success(main_store.get_room_version(room_id))
# Add another local user to the room. This user is going to be kicked in a
# rejected event.
bert_user_id = self.register_user("bert", "test")
bert_tok = self.login("bert", "test")
self.helper.join(room_id, user=bert_user_id, tok=bert_tok)
# Allow the remote user to kick bert.
# The remote user is going to send a rejected power levels event later on and we
# need state resolution to order it before another power levels event kermit is
# going to send later on. Hence we give both users the same power level, so that
# ties are broken by `origin_server_ts`.
self.helper.send_state(
room_id,
"m.room.power_levels",
{"users": {kermit_user_id: 100, OTHER_USER: 100}},
tok=kermit_tok,
)
# Add the remote user to the room.
other_member_event = self.get_success(
event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
)
initial_state_map = self.get_success(
main_store.get_partial_current_state_ids(room_id)
)
create_event = self.get_success(
main_store.get_event(initial_state_map[("m.room.create", "")])
)
bert_member_event = self.get_success(
main_store.get_event(initial_state_map[("m.room.member", bert_user_id)])
)
power_levels_event = self.get_success(
main_store.get_event(initial_state_map[("m.room.power_levels", "")])
)
# We now need a rejected state event that will fail
# `check_state_independent_auth_rules` but pass
# `check_state_dependent_auth_rules`.
# First, we create a power levels event that we pretend the remote server has
# accepted, but the local homeserver will reject.
next_depth = 100
next_timestamp = other_member_event.origin_server_ts + 100
rejected_power_levels_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"type": "m.room.power_levels",
"state_key": "",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [other_member_event.event_id],
"auth_events": [
initial_state_map[("m.room.create", "")],
initial_state_map[("m.room.power_levels", "")],
# The event will be rejected because of the duplicated auth
# event.
other_member_event.event_id,
other_member_event.event_id,
],
"origin_server_ts": next_timestamp,
"depth": next_depth,
"content": power_levels_event.content,
}
),
room_version,
)
next_depth += 1
next_timestamp += 100
with LoggingContext("send_rejected_power_levels_event"):
self.get_success(
self.hs.get_federation_event_handler()._process_pulled_event(
self.OTHER_SERVER_NAME,
rejected_power_levels_event,
backfilled=False,
)
)
self.assertEqual(
self.get_success(
main_store.get_rejection_reason(
rejected_power_levels_event.event_id
)
),
"auth_error",
)
# Then we create a kick event for a local user that cites the rejected power
# levels event in its auth events. The kick event will be rejected solely
# because of the rejected auth event and would otherwise be accepted.
rejected_kick_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"type": "m.room.member",
"state_key": bert_user_id,
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [rejected_power_levels_event.event_id],
"auth_events": [
initial_state_map[("m.room.create", "")],
rejected_power_levels_event.event_id,
initial_state_map[("m.room.member", bert_user_id)],
initial_state_map[("m.room.member", OTHER_USER)],
],
"origin_server_ts": next_timestamp,
"depth": next_depth,
"content": {"membership": "leave"},
}
),
room_version,
)
next_depth += 1
next_timestamp += 100
# The kick event must fail the state-independent auth rules, but pass the
# state-dependent auth rules, so that it has a chance of making it through state
# resolution.
self.get_failure(
check_state_independent_auth_rules(main_store, rejected_kick_event),
AuthError,
)
check_state_dependent_auth_rules(
rejected_kick_event,
[create_event, power_levels_event, other_member_event, bert_member_event],
)
# The kick event must also win over the original member event during state
# resolution.
self.assertEqual(
self.get_success(
_mainline_sort(
self.clock,
room_id,
event_ids=[
bert_member_event.event_id,
rejected_kick_event.event_id,
],
resolved_power_event_id=power_levels_event.event_id,
event_map={
bert_member_event.event_id: bert_member_event,
rejected_kick_event.event_id: rejected_kick_event,
},
state_res_store=main_store,
)
),
[bert_member_event.event_id, rejected_kick_event.event_id],
"The rejected kick event will not be applied after bert's join event "
"during state resolution. The test setup is incorrect.",
)
with LoggingContext("send_rejected_kick_event"):
self.get_success(
self.hs.get_federation_event_handler()._process_pulled_event(
self.OTHER_SERVER_NAME, rejected_kick_event, backfilled=False
)
)
self.assertEqual(
self.get_success(
main_store.get_rejection_reason(rejected_kick_event.event_id)
),
"auth_error",
)
# We need another power levels event which will win over the rejected one during
# state resolution, otherwise we hit other issues where we end up with rejected
# a power levels event during state resolution.
self.reactor.advance(100) # ensure the `origin_server_ts` is larger
new_power_levels_event = self.get_success(
main_store.get_event(
self.helper.send_state(
room_id,
"m.room.power_levels",
{"users": {kermit_user_id: 100, OTHER_USER: 100, bert_user_id: 1}},
tok=kermit_tok,
)["event_id"]
)
)
self.assertEqual(
self.get_success(
_reverse_topological_power_sort(
self.clock,
room_id,
event_ids=[
new_power_levels_event.event_id,
rejected_power_levels_event.event_id,
],
event_map={},
state_res_store=main_store,
full_conflicted_set=set(),
)
),
[rejected_power_levels_event.event_id, new_power_levels_event.event_id],
"The power levels events will not have the desired ordering during state "
"resolution. The test setup is incorrect.",
)
# Create a missing event, so that the local homeserver has to do a `/state` or
# `/state_ids` request to pull state from the remote homeserver.
missing_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"type": "m.room.message",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [rejected_kick_event.event_id],
"auth_events": [
initial_state_map[("m.room.create", "")],
initial_state_map[("m.room.power_levels", "")],
initial_state_map[("m.room.member", OTHER_USER)],
],
"origin_server_ts": next_timestamp,
"depth": next_depth,
"content": {"msgtype": "m.text", "body": "foo"},
}
),
room_version,
)
next_depth += 1
next_timestamp += 100
# The pulled event has two prev events, one of which is missing. We will make a
# `/state` or `/state_ids` request to the remote homeserver to ask it for the
# state before the missing prev event.
pulled_event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{
"type": "m.room.message",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [
new_power_levels_event.event_id,
missing_event.event_id,
],
"auth_events": [
initial_state_map[("m.room.create", "")],
new_power_levels_event.event_id,
initial_state_map[("m.room.member", OTHER_USER)],
],
"origin_server_ts": next_timestamp,
"depth": next_depth,
"content": {"msgtype": "m.text", "body": "bar"},
}
),
room_version,
)
next_depth += 1
next_timestamp += 100
# Prepare the response for the `/state` or `/state_ids` request.
# The remote server believes bert has been kicked, while the local server does
# not.
state_before_missing_event = self.get_success(
main_store.get_events_as_list(initial_state_map.values())
)
state_before_missing_event = [
event
for event in state_before_missing_event
if event.event_id != bert_member_event.event_id
]
state_before_missing_event.append(rejected_kick_event)
# We have to bump the clock a bit, to keep the retry logic in
# `FederationClient.get_pdu` happy
self.reactor.advance(60000)
with LoggingContext("send_pulled_event"):
async def get_event(
destination: str, event_id: str, timeout: Optional[int] = None
) -> JsonDict:
self.assertEqual(destination, self.OTHER_SERVER_NAME)
self.assertEqual(event_id, missing_event.event_id)
return {"pdus": [missing_event.get_pdu_json()]}
async def get_room_state_ids(
destination: str, room_id: str, event_id: str
) -> JsonDict:
self.assertEqual(destination, self.OTHER_SERVER_NAME)
self.assertEqual(event_id, missing_event.event_id)
return {
"pdu_ids": [event.event_id for event in state_before_missing_event],
"auth_chain_ids": [],
}
async def get_room_state(
room_version: RoomVersion, destination: str, room_id: str, event_id: str
) -> StateRequestResponse:
self.assertEqual(destination, self.OTHER_SERVER_NAME)
self.assertEqual(event_id, missing_event.event_id)
return StateRequestResponse(
state=state_before_missing_event,
auth_events=[],
)
self.mock_federation_transport_client.get_event.side_effect = get_event
self.mock_federation_transport_client.get_room_state_ids.side_effect = (
get_room_state_ids
)
self.mock_federation_transport_client.get_room_state.side_effect = (
get_room_state
)
self.get_success(
self.hs.get_federation_event_handler()._process_pulled_event(
self.OTHER_SERVER_NAME, pulled_event, backfilled=False
)
)
self.assertIsNone(
self.get_success(
main_store.get_rejection_reason(pulled_event.event_id)
),
"Pulled event was unexpectedly rejected, likely due to a problem with "
"the test setup.",
)
self.assertEqual(
{pulled_event.event_id},
self.get_success(
main_store.have_events_in_timeline([pulled_event.event_id])
),
"Pulled event was not persisted, likely due to a problem with the test "
"setup.",
)
# We must not accept rejected events into the room state, so we expect bert
# to not be kicked, even if the remote server believes so.
new_state_map = self.get_success(
main_store.get_partial_current_state_ids(room_id)
)
self.assertEqual(
new_state_map[("m.room.member", bert_user_id)],
bert_member_event.event_id,
"Rejected kick event unexpectedly became part of room state.",
)

View File

@@ -4140,90 +4140,3 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
{"b": 2},
channel.json_body["account_data"]["rooms"]["test_room"]["m.per_room"],
)
class UsersByExternalIdTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.get_success(
self.store.record_user_external_id(
"the-auth-provider", "the-external-id", self.other_user
)
)
self.get_success(
self.store.record_user_external_id(
"another-auth-provider", "a:complex@external/id", self.other_user
)
)
def test_no_auth(self) -> None:
"""Try to lookup a user without authentication."""
url = (
"/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
)
channel = self.make_request(
"GET",
url,
)
self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_binding_does_not_exist(self) -> None:
"""Tests that a lookup for an external ID that does not exist returns a 404"""
url = "/_synapse/admin/v1/auth_providers/the-auth-provider/users/unknown-id"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_success(self) -> None:
"""Tests a successful external ID lookup"""
url = (
"/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
{"user_id": self.other_user},
channel.json_body,
)
def test_success_urlencoded(self) -> None:
"""Tests a successful external ID lookup with an url-encoded ID"""
url = "/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
{"user_id": self.other_user},
channel.json_body,
)

View File

@@ -11,37 +11,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest as stdlib_unittest
import unittest
from pydantic import BaseModel, ValidationError
from typing_extensions import Literal
from pydantic import ValidationError
from synapse.rest.client.models import EmailRequestTokenBody
class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase):
class Model(BaseModel):
medium: Literal["email", "msisdn"]
def test_accepts_valid_medium_string(self) -> None:
"""Sanity check that Pydantic behaves sensibly with an enum-of-str
This is arguably more of a test of a class that inherits from str and Enum
simultaneously.
"""
model = self.Model.parse_obj({"medium": "email"})
self.assertEqual(model.medium, "email")
def test_rejects_invalid_medium_value(self) -> None:
with self.assertRaises(ValidationError):
self.Model.parse_obj({"medium": "interpretive_dance"})
def test_rejects_invalid_medium_type(self) -> None:
with self.assertRaises(ValidationError):
self.Model.parse_obj({"medium": 123})
class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase):
class EmailRequestTokenBodyTestCase(unittest.TestCase):
base_request = {
"client_secret": "hunter2",
"email": "alice@wonderland.com",

View File

@@ -115,6 +115,5 @@ class PurgeTests(HomeserverTestCase):
)
# The events aren't found.
self.store._invalidate_local_get_event_cache(create_event.event_id)
self.get_failure(self.store.get_event(create_event.event_id), NotFoundError)
self.get_failure(self.store.get_event(first["event_id"]), NotFoundError)