1
0

Compare commits

..

13 Commits

Author SHA1 Message Date
Erik Johnston
5de571987e WIP: Type TreeCache 2022-07-18 13:05:07 +01:00
Erik Johnston
129691f190 Comment TreeCache 2022-07-18 10:49:12 +01:00
Erik Johnston
462db2a171 Comment LruCache 2022-07-18 10:48:24 +01:00
Erik Johnston
7f7b36d56d Comment LruCache 2022-07-18 10:40:36 +01:00
Erik Johnston
057ae8b61c Comments 2022-07-17 11:34:34 +01:00
Erik Johnston
7aceec3ed9 Fix up 2022-07-15 16:52:16 +01:00
Erik Johnston
cad555f07c Better stuff 2022-07-15 16:31:53 +01:00
Erik Johnston
23c2f394a5 Fix mypy 2022-07-15 16:30:56 +01:00
Erik Johnston
602a81f5a2 don't update access 2022-07-15 16:24:10 +01:00
Erik Johnston
f046366d2a Fix test 2022-07-15 15:54:01 +01:00
Erik Johnston
a22716c5c5 Fix literal 2022-07-15 15:31:57 +01:00
Erik Johnston
40a8fba5f6 Newsfile 2022-07-15 15:27:03 +01:00
Erik Johnston
326a175987 Make DictionaryCache have better expiry properties 2022-07-15 15:26:02 +01:00
232 changed files with 2878 additions and 5854 deletions

View File

@@ -69,7 +69,7 @@ with open('pyproject.toml', 'w') as f:
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
pipx install poetry==1.1.14
pipx install poetry==1.1.12
~/.local/bin/poetry lock
echo "::group::Patched pyproject.toml"

View File

@@ -1,16 +1,3 @@
# Commits in this file will be removed from GitHub blame results.
#
# To use this file locally, use:
# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" <files>
#
# or configure the `blame.ignoreRevsFile` option in your git config.
#
# If ignoring a pull request that was not squash merged, only the merge
# commit needs to be put here. Child commits will be resolved from it.
# Run black (#3679).
8b3d9b6b199abb87246f982d5db356f1966db925
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3

View File

@@ -135,42 +135,11 @@ jobs:
/logs/**/*.log*
complement:
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
# TODO: run complement (as with twisted trunk, see #12473).
strategy:
fail-fast: false
matrix:
include:
- arrangement: monolith
database: SQLite
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
# open an issue if the build fails, so we know about it.
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
if: failure()
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
- mypy

View File

@@ -328,9 +328,6 @@ jobs:
- arrangement: monolith
database: Postgres
- arrangement: workers
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
@@ -346,6 +343,30 @@ jobs:
shell: bash
name: Run Complement Tests
# XXX When complement with workers is stable, move this back into the standard
# "complement" matrix above.
#
# See https://github.com/matrix-org/synapse/issues/13161
complement-workers:
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}

View File

@@ -127,12 +127,12 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
pipx install poetry==1.1.14
pipx install poetry==1.1.12
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
# NOT IN 1.1.14 poetry lock --check
# NOT IN 1.1.12 poetry lock --check
working-directory: synapse
- run: |

View File

@@ -1,222 +1,7 @@
Synapse 1.65.0 (2022-08-16)
===========================
No significant changes since 1.65.0rc2.
Synapse 1.65.0rc2 (2022-08-11)
==============================
Internal Changes
----------------
- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501))
Synapse 1.65.0rc1 (2022-08-09)
==============================
Features
--------
- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273))
- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343))
- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370))
- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428))
- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429))
- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441))
Bugfixes
--------
- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353))
- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413))
- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432))
Updates to the Docker image
---------------------------
- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372))
Improved Documentation
----------------------
- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897))
- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221))
- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230))
- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437))
- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438))
- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443))
- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449))
- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450))
- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481))
Internal Changes
----------------
- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978))
- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160))
- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213))
- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346))
- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355))
- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368))
- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383))
- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393))
- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397))
- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403))
- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431))
- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416))
- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420))
- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435))
- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439))
- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442))
- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455))
- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447))
- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452))
- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460))
- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469))
- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467))
Synapse 1.64.0 (2022-08-02)
===========================
No significant changes since 1.64.0rc2.
Deprecation Warning
-------------------
Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
Synapse 1.64.0rc2 (2022-07-29)
==============================
This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
Synapse 1.64.0rc1 (2022-07-26)
==============================
This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu.
Features
--------
- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038))
- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205))
- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208))
- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220))
- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276))
- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263))
- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270))
- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278))
- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296))
- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350))
- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352))
Improved Documentation
----------------------
- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231))
- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261))
- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271))
- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314))
- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333))
- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338))
- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344))
Deprecations and Removals
-------------------------
- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967))
- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192))
- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239))
- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326))
Internal Changes
----------------
- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942))
- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943))
- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094))
- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172))
- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175))
- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198))
- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215))
- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218))
- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224))
- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240))
- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308))
- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251))
- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257))
- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258))
- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260))
- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266))
- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274))
- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279))
- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307))
- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285))
- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292))
- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297))
- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299))
- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300))
- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303))
- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310))
- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311))
- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313))
- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318))
- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320))
- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323))
- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324))
- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362))
- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349))
- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342))
- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354))
Synapse 1.63.1 (2022-07-20)
===========================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332))
Synapse 1.63.0 (2022-07-19)
===========================
Improved Documentation
----------------------
- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321))
Synapse vNext
=============
As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640).
Synapse 1.63.0rc1 (2022-07-12)
==============================
@@ -226,7 +11,7 @@ Features
- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125))
- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031))
- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
- Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
Bugfixes
@@ -254,7 +39,7 @@ Improved Documentation
- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029))
- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032))
- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166))
- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212))
- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139))
- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132))
@@ -339,7 +124,7 @@ Bugfixes
- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939))
- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))

View File

@@ -34,14 +34,6 @@ additional-css = [
"docs/website_files/table-of-contents.css",
"docs/website_files/remove-nav-buttons.css",
"docs/website_files/indent-section-headers.css",
"docs/website_files/version-picker.css",
]
additional-js = [
"docs/website_files/table-of-contents.js",
"docs/website_files/version-picker.js",
"docs/website_files/version.js",
]
theme = "docs/website_files/theme"
[preprocessor.schema_versions]
command = "./scripts-dev/schema_versions.py"
additional-js = ["docs/website_files/table-of-contents.js"]
theme = "docs/website_files/theme"

1
changelog.d/12943.misc Normal file
View File

@@ -0,0 +1 @@
Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events.

View File

@@ -0,0 +1 @@
Drop tables used for groups/communities.

1
changelog.d/13094.misc Normal file
View File

@@ -0,0 +1 @@
Make the AS login method call `Auth.get_user_by_req` for checking the AS token.

1
changelog.d/13175.misc Normal file
View File

@@ -0,0 +1 @@
Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper.

View File

@@ -0,0 +1 @@
Drop support for delegating email verification to an external server.

1
changelog.d/13198.misc Normal file
View File

@@ -0,0 +1 @@
Refactor receipts servlet logic to avoid duplicated code.

View File

@@ -0,0 +1 @@
Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh.

1
changelog.d/13215.misc Normal file
View File

@@ -0,0 +1 @@
Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table.

1
changelog.d/13218.misc Normal file
View File

@@ -0,0 +1 @@
Remove unused database table `event_reference_hashes`.

View File

@@ -0,0 +1 @@
Add support for room version 10.

1
changelog.d/13224.misc Normal file
View File

@@ -0,0 +1 @@
Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13231.doc Normal file
View File

@@ -0,0 +1 @@
Provide an example of using the Admin API. Contributed by @jejo86.

1
changelog.d/13233.doc Normal file
View File

@@ -0,0 +1 @@
Move the documentation for how URL previews work to the URL preview module.

View File

@@ -0,0 +1 @@
Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu.

1
changelog.d/13240.misc Normal file
View File

@@ -0,0 +1 @@
Call the v2 identity service `/3pid/unbind` endpoint, rather than v1.

1
changelog.d/13242.misc Normal file
View File

@@ -0,0 +1 @@
Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13251.misc Normal file
View File

@@ -0,0 +1 @@
Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/13253.misc Normal file
View File

@@ -0,0 +1 @@
Preparatory work for a per-room rate limiter on joins.

1
changelog.d/13254.misc Normal file
View File

@@ -0,0 +1 @@
Preparatory work for a per-room rate limiter on joins.

1
changelog.d/13255.misc Normal file
View File

@@ -0,0 +1 @@
Preparatory work for a per-room rate limiter on joins.

1
changelog.d/13257.misc Normal file
View File

@@ -0,0 +1 @@
Log the stack when waiting for an entire room to be un-partial stated.

1
changelog.d/13260.misc Normal file
View File

@@ -0,0 +1 @@
Clean-up tests for notifications.

1
changelog.d/13261.doc Normal file
View File

@@ -0,0 +1 @@
Move the documentation for how URL previews work to the URL preview module.

1
changelog.d/13263.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12.

1
changelog.d/13266.misc Normal file
View File

@@ -0,0 +1 @@
Do not fail build if complement with workers fails.

1
changelog.d/13267.misc Normal file
View File

@@ -0,0 +1 @@
Don't pull out state in `compute_event_context` for unconflicted state.

1
changelog.d/13270.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join.

1
changelog.d/13274.misc Normal file
View File

@@ -0,0 +1 @@
Don't pull out state in `compute_event_context` for unconflicted state.

1
changelog.d/13278.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution.

1
changelog.d/13279.misc Normal file
View File

@@ -0,0 +1 @@
Reduce the rebuild time for the complement-synapse docker image.

1
changelog.d/13284.misc Normal file
View File

@@ -0,0 +1 @@
Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak.

1
changelog.d/13292.misc Normal file
View File

@@ -0,0 +1 @@
Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently.

View File

@@ -1,145 +0,0 @@
# Creating multiple stream writers with a bash script
This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers.
Stream writers require both replication and HTTP listeners.
It also prints out the example lines for Synapse main configuration file.
Remember to route necessary endpoints directly to a worker associated with it.
If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array.
```sh
#!/bin/bash
# Start with these replication and http ports.
# The script loop starts with the exact port and then increments it by one.
REP_START_PORT=8034
HTTP_START_PORT=8044
# Stream writer workers to generate. Feel free to add or remove them as you wish.
# Event persister ("events") isn't included here as it does not require its
# own HTTP listener.
STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" )
NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]})
i=0
while [ $i -lt "$NUM_WRITERS" ]
do
cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml
worker_app: synapse.app.generic_worker
worker_name: ${STREAM_WRITERS[$i]}_stream_writer
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: $(expr $REP_START_PORT + $i)
resources:
- names: [replication]
- type: http
port: $(expr $HTTP_START_PORT + $i)
resources:
- names: [client]
worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml
EOF
HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer:
host: 127.0.0.1
port: $(expr $REP_START_PORT + $i)
"
HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer
"
((i++))
done
cat << EXAMPLECONFIG
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information.
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
$HOMESERVER_YAML_INSTANCE_MAP
stream_writers:
$HOMESERVER_YAML_STREAM_WRITERS
EXAMPLECONFIG
```
Copy the code above save it to a file ```create_stream_writers.sh``` (for example).
Make the script executable by running ```chmod +x create_stream_writers.sh```.
## Run the script to create workers and print out a sample configuration
Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```.
```console
$ ./create_stream_writers.sh
# Add these lines to your homeserver.yaml.
# Don't forget to configure your reverse proxy and
# necessary endpoints to their respective worker.
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md
# for more information
# Remember: Under NO circumstances should the replication
# listener be exposed to the public internet;
# it has no authentication and is unencrypted.
instance_map:
presence_stream_writer:
host: 127.0.0.1
port: 8034
typing_stream_writer:
host: 127.0.0.1
port: 8035
receipts_stream_writer:
host: 127.0.0.1
port: 8036
to_device_stream_writer:
host: 127.0.0.1
port: 8037
account_data_stream_writer:
host: 127.0.0.1
port: 8038
stream_writers:
presence: presence_stream_writer
typing: typing_stream_writer
receipts: receipts_stream_writer
to_device: to_device_stream_writer
account_data: account_data_stream_writer
```
Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file.
## Write directly to Synapse configuration file
You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```.
If you do this, back up your original configuration file first:
```console
# Back up homeserver.yaml first
cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak
# Create workers and write output to your homeserver.yaml
./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml
```

View File

@@ -1,4 +1,4 @@
# Creating multiple generic workers with a bash script
# Creating multiple workers with a bash script
Setting up multiple worker configuration files manually can be time-consuming.
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:

50
debian/changelog vendored
View File

@@ -1,53 +1,3 @@
matrix-synapse-py3 (1.65.0) stable; urgency=medium
* New Synapse release 1.65.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Aug 2022 16:51:26 +0100
matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium
* New Synapse release 1.65.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 11 Aug 2022 11:38:18 +0100
matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium
* New Synapse release 1.65.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 09 Aug 2022 11:39:29 +0100
matrix-synapse-py3 (1.64.0) stable; urgency=medium
* New Synapse release 1.64.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Aug 2022 10:32:30 +0100
matrix-synapse-py3 (1.64.0~rc2) stable; urgency=medium
* New Synapse release 1.64.0rc2.
-- Synapse Packaging team <packages@matrix.org> Fri, 29 Jul 2022 12:22:53 +0100
matrix-synapse-py3 (1.64.0~rc1) stable; urgency=medium
* New Synapse release 1.64.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Jul 2022 12:11:49 +0100
matrix-synapse-py3 (1.63.1) stable; urgency=medium
* New Synapse release 1.63.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Jul 2022 13:36:52 +0100
matrix-synapse-py3 (1.63.0) stable; urgency=medium
* Clarify that homeserver server names are included in the data reported
by opt-in server stats reporting (`report_stats` homeserver config option).
* New Synapse release 1.63.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Jul 2022 14:42:24 +0200
matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium
* New Synapse release 1.63.0rc1.

View File

@@ -31,7 +31,7 @@ EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report homeserver usage statistics.
# Whether to report anonymized homeserver usage statistics.
report_stats: false
EOF
fi

View File

@@ -37,7 +37,7 @@ msgstr ""
#. Type: boolean
#. Description
#: ../templates:2001
msgid "Report homeserver usage statistics?"
msgid "Report anonymous statistics?"
msgstr ""
#. Type: boolean
@@ -45,11 +45,11 @@ msgstr ""
#: ../templates:2001
msgid ""
"Developers of Matrix and Synapse really appreciate helping the project out "
"by reporting homeserver usage statistics from this homeserver. Your "
"homeserver's server name, along with very basic aggregate data (e.g. "
"number of users) will be reported. But it helps track the growth of the "
"Matrix community, and helps in making Matrix a success, as well as to "
"convince other networks that they should peer with Matrix."
"by reporting anonymized usage statistics from this homeserver. Only very "
"basic aggregate data (e.g. number of users) will be reported, but it helps "
"track the growth of the Matrix community, and helps in making Matrix a "
"success, as well as to convince other networks that they should peer with "
"Matrix."
msgstr ""
#. Type: boolean

13
debian/templates vendored
View File

@@ -10,13 +10,12 @@ _Description: Name of the server:
Template: matrix-synapse/report-stats
Type: boolean
Default: false
_Description: Report homeserver usage statistics?
_Description: Report anonymous statistics?
Developers of Matrix and Synapse really appreciate helping the
project out by reporting homeserver usage statistics from this
homeserver. Your homeserver's server name, along with very basic
aggregate data (e.g. number of users) will be reported. But it
helps track the growth of the Matrix community, and helps in
making Matrix a success, as well as to convince other networks
that they should peer with Matrix.
project out by reporting anonymized usage statistics from this
homeserver. Only very basic aggregate data (e.g. number of users)
will be reported, but it helps track the growth of the Matrix
community, and helps in making Matrix a success, as well as to
convince other networks that they should peer with Matrix.
.
Thank you.

View File

@@ -40,13 +40,12 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
build-essential cargo git libffi-dev libssl-dev \
apt-get update -qq && apt-get install -yqq git \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.14,
# We use a specific commit from poetry's master branch instead of our usual 1.1.12,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
@@ -69,18 +68,7 @@ COPY pyproject.toml poetry.lock /synapse/
# reason, such as when a git repository is used directly as a dependency.
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
# If specified, we won't use the Poetry lockfile.
# Instead, we'll just install what a regular `pip install` would from PyPI.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Export the dependencies, but only if we're actually going to use the Poetry lockfile.
# Otherwise, just create an empty requirements file so that the Dockerfile can
# proceed.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
/root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \
else \
touch /synapse/requirements.txt; \
fi
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
###
### Stage 1: builder
@@ -120,17 +108,8 @@ COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
fi
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
###
### Stage 2: runtime

View File

@@ -1,62 +1,38 @@
# syntax=docker/dockerfile:1
# Inherit from the official Synapse docker image
ARG SYNAPSE_VERSION=latest
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
FROM debian:bullseye-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
redis-server nginx-light
# Similarly, a base to copy the redis server from.
#
# The redis docker image has fewer dynamic libraries than the debian package,
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
RUN mkdir -p /etc/supervisor/conf.d
# Install deps
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
redis-server nginx-light
# Copy over redis and nginx
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
COPY --from=deps_base /etc/nginx /etc/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
RUN chown www-data /var/log/nginx /var/lib/nginx
# Disable the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Copy a script to prefix log lines with the supervisor program name
COPY ./docker/prefix-log /usr/local/bin/
# Expose nginx listener port
EXPOSE 8080/tcp
# Expose nginx listener port
EXPOSE 8080/tcp
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh

View File

@@ -22,10 +22,6 @@ Consult the [contributing guide][guideComplementSh] for instructions on how to u
Under some circumstances, you may wish to build the images manually.
The instructions below will lead you to doing that.
Note that these images can only be built using [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/),
therefore BuildKit needs to be enabled when calling `docker build`. This can be done by
setting `DOCKER_BUILDKIT=1` in your environment.
Start by building the base Synapse docker image. If you wish to run tests with the latest
release of Synapse, instead of your current checkout, you can skip this step. From the
root of the repository:

View File

@@ -1,4 +1,3 @@
# syntax=docker/dockerfile:1
# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse
# by including a built-in postgres instance, as well as setting up the homeserver so
# that it is ready for testing via Complement.

View File

@@ -67,10 +67,6 @@ rc_joins:
per_second: 9999
burst_count: 9999
rc_joins_per_room:
per_second: 9999
burst_count: 9999
rc_3pid_validation:
per_second: 1000
burst_count: 1000

View File

@@ -19,7 +19,7 @@ username=www-data
autorestart=true
[program:redis]
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0

View File

@@ -68,7 +68,7 @@
- [Federation](usage/administration/admin_api/federation.md)
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
- [Reporting Anonymised Statistics](usage/administration/monitoring/reporting_anonymised_statistics.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)

View File

@@ -237,28 +237,3 @@ poetry run pip install build && poetry run python -m build
because [`build`](https://github.com/pypa/build) is a standardish tool which
doesn't require poetry. (It's what we use in CI too). However, you could try
`poetry build` too.
# Troubleshooting
## Check the version of poetry with `poetry --version`.
At the time of writing, the 1.2 series is beta only. We have seen some examples
where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly
by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical
[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain
[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775).
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`.
## Clear caches: `poetry cache clear --all pypi`.
Poetry caches a bunch of information about packages that isn't readily available
from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.

View File

@@ -263,7 +263,7 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("my_field"):
return (self.api.get_qualified_user_id(username), None)
return self.api.get_qualified_user_id(username)
async def check_pass(
self,
@@ -280,5 +280,5 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("password"):
return (self.api.get_qualified_user_id(username), None)
return self.api.get_qualified_user_id(username)
```

View File

@@ -79,32 +79,63 @@ server {
}
```
### Caddy v1
```
matrix.example.com {
proxy /_matrix http://localhost:8008 {
transparent
}
proxy /_synapse/client http://localhost:8008 {
transparent
}
}
example.com:8448 {
proxy / http://localhost:8008 {
transparent
}
}
```
### Caddy v2
```
matrix.example.com {
reverse_proxy /_matrix/* localhost:8008
reverse_proxy /_synapse/client/* localhost:8008
reverse_proxy /_matrix/* http://localhost:8008
reverse_proxy /_synapse/client/* http://localhost:8008
}
example.com:8448 {
reverse_proxy localhost:8008
reverse_proxy http://localhost:8008
}
```
[Delegation](delegate.md) example:
```
(matrix-well-known-header) {
# Headers
header Access-Control-Allow-Origin "*"
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
header Content-Type "application/json"
}
example.com {
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
respond /.well-known/matrix/client `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}`
handle /.well-known/matrix/server {
import matrix-well-known-header
respond `{"m.server":"matrix.example.com:443"}`
}
handle /.well-known/matrix/client {
import matrix-well-known-header
respond `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}`
}
}
matrix.example.com {
reverse_proxy /_matrix/* localhost:8008
reverse_proxy /_synapse/client/* localhost:8008
reverse_proxy /_matrix/* http://localhost:8008
reverse_proxy /_synapse/client/* http://localhost:8008
}
```

View File

@@ -22,7 +22,7 @@ choose their own username.
In the first case - where users are automatically allocated a Matrix ID - it is
the responsibility of the mapping provider to normalise the SSO attributes and
map them to a valid Matrix ID. The [specification for Matrix
IDs](https://spec.matrix.org/latest/appendices/#user-identifiers) has some
IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some
information about what is considered valid.
If the mapping provider does not assign a Matrix ID, then Synapse will
@@ -37,10 +37,9 @@ as Synapse). The Synapse config is then modified to point to the mapping provide
## OpenID Mapping Providers
The OpenID mapping provider can be customized by editing the
[`oidc_providers.user_mapping_provider.module`](usage/configuration/config_documentation.md#oidc_providers)
config option.
`oidc_config.user_mapping_provider.module` config option.
`oidc_providers.user_mapping_provider.config` allows you to provide custom
`oidc_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
@@ -59,7 +58,7 @@ A custom mapping provider must specify the following methods:
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
`oidc_providers.user_mapping_provider.config` homeserver config option.
`oidc_config.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract and validate
any option values they need here.
- Whatever is returned will be passed back to the user mapping provider module's
@@ -103,7 +102,7 @@ A custom mapping provider must specify the following methods:
will be returned as part of the response during a successful login.
Note that care should be taken to not overwrite any of the parameters
usually returned as part of the [login response](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login).
usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
### Default OpenID Mapping Provider
@@ -114,8 +113,7 @@ specified in the config. It is located at
## SAML Mapping Providers
The SAML mapping provider can be customized by editing the
[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
config option.
`saml2_config.user_mapping_provider.module` config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for

View File

@@ -91,34 +91,18 @@ process, for example:
# Upgrading to v1.64.0
## Deprecation of the ability to delegate e-mail verification to identity servers
## Delegation of email validation no longer supported
Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
As of this version, Synapse no longer allows the tasks of verifying email address
ownership, and password reset confirmation, to be delegated to an identity server.
If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
To continue to allow users to add email addresses to their homeserver accounts,
and perform password resets, make sure that Synapse is configured with a
working email server in the `email` configuration section (including, at a
minimum, a `notif_from` setting.)
The option that will be removed is `account_threepid_delegates.email`.
## Changes to the event replication streams
Synapse now includes a flag indicating if an event is an outlier when
replicating it to other workers. This is a forwards- and backwards-incompatible
change: v1.63 and workers cannot process events replicated by v1.64 workers, and
vice versa.
Once all workers are upgraded to v1.64 (or downgraded to v1.63), event
replication will resume as normal.
## frozendict release
[frozendict 2.3.3](https://github.com/Marco-Sulla/python-frozendict/releases/tag/v2.3.3)
has recently been released, which fixes a memory leak that occurs during `/sync`
requests. We advise server administrators who installed Synapse via pip to upgrade
frozendict with `pip install --upgrade frozendict`. The Docker image
`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` already
include the updated library.
Specifying an `email` setting under `account_threepid_delegates` will now cause
an error at startup.
# Upgrading to v1.62.0

View File

@@ -5,9 +5,8 @@
Many of the API calls in the admin api will require an `access_token` for a
server admin. (Note that a server admin is distinct from a room admin.)
An existing user can be marked as a server admin by updating the database directly.
A user can be marked as a server admin by updating the database directly, e.g.:
Check your [database settings](config_documentation.md#database) in the configuration file, connect to the correct database using either `psql [database name]` (if using PostgreSQL) or `sqlite3 path/to/your/database.db` (if using SQLite) and elevate the user `@foo:bar.com` to administrator.
```sql
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
```

View File

@@ -2,11 +2,11 @@
This API allows you to manage tokens which can be used to authenticate
registration requests, as proposed in
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
and stabilised in version 1.2 of the Matrix specification.
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
To use it, you will need to enable the `registration_requires_token` config
option, and authenticate by providing an `access_token` for a server admin:
see [Admin API](../admin_api).
see [Admin API](../../usage/administration/admin_api).
Note that this API is still experimental; not all clients may support it yet.
## Registration token objects

View File

@@ -1,11 +1,11 @@
# Reporting Homeserver Usage Statistics
# Reporting Anonymised Statistics
When generating your Synapse configuration file, you are asked whether you
would like to report usage statistics to Matrix.org. These statistics
would like to report anonymised statistics to Matrix.org. These statistics
provide the foundation a glimpse into the number of Synapse homeservers
participating in the network, as well as statistics such as the number of
rooms being created and messages being sent. This feature is sometimes
affectionately called "phone home" stats. Reporting
affectionately called "phone-home" stats. Reporting
[is optional](../../configuration/config_documentation.md#report_stats)
and the reporting endpoint
[can be configured](../../configuration/config_documentation.md#report_stats_endpoint),
@@ -21,9 +21,9 @@ The following statistics are sent to the configured reporting endpoint:
| Statistic Name | Type | Description |
|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `homeserver` | string | The homeserver's server name. |
| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). |
| `cpu_average` | int | CPU time in % of a single core (not % of all cores). |
| `homeserver` | string | The homeserver's server name. |
| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. |
| `timestamp` | int | The current time, represented as the number of seconds since the epoch. |
| `uptime_seconds` | int | The number of seconds since the homeserver was last started. |

File diff suppressed because it is too large Load Diff

View File

@@ -24,11 +24,6 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
slightly so that they are more visually distinguishable from the section headers
(the bold titles). This is done through the `indent-section-headers.css` file.
In addition to these modifications, we have added a version picker to the documentation.
Users can switch between documentations for different versions of Synapse.
This functionality was implemented through the `version-picker.js` and
`version-picker.css` files.
More information can be found in mdbook's official documentation for
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
and

View File

@@ -131,18 +131,6 @@
<i class="fa fa-search"></i>
</button>
{{/if}}
<div class="version-picker">
<div class="dropdown">
<div class="select">
<span></span>
<i class="fa fa-chevron-down"></i>
</div>
<input type="hidden" name="version">
<ul class="dropdown-menu">
<!-- Versions will be added dynamically in version-picker.js -->
</ul>
</div>
</div>
</div>
<h1 class="menu-title">{{ book_title }}</h1>
@@ -321,4 +309,4 @@
{{/if}}
</body>
</html>
</html>

View File

@@ -1,78 +0,0 @@
.version-picker {
display: flex;
align-items: center;
}
.version-picker .dropdown {
width: 130px;
max-height: 29px;
margin-left: 10px;
display: inline-block;
border-radius: 4px;
border: 1px solid var(--theme-popup-border);
position: relative;
font-size: 13px;
color: var(--fg);
height: 100%;
text-align: left;
}
.version-picker .dropdown .select {
cursor: pointer;
display: block;
padding: 5px 2px 5px 15px;
}
.version-picker .dropdown .select > i {
font-size: 10px;
color: var(--fg);
cursor: pointer;
float: right;
line-height: 20px !important;
}
.version-picker .dropdown:hover {
border: 1px solid var(--theme-popup-border);
}
.version-picker .dropdown:active {
background-color: var(--theme-popup-bg);
}
.version-picker .dropdown.active:hover,
.version-picker .dropdown.active {
border: 1px solid var(--theme-popup-border);
border-radius: 2px 2px 0 0;
background-color: var(--theme-popup-bg);
}
.version-picker .dropdown.active .select > i {
transform: rotate(-180deg);
}
.version-picker .dropdown .dropdown-menu {
position: absolute;
background-color: var(--theme-popup-bg);
width: 100%;
left: -1px;
right: 1px;
margin-top: 1px;
border: 1px solid var(--theme-popup-border);
border-radius: 0 0 4px 4px;
overflow: hidden;
display: none;
max-height: 300px;
overflow-y: auto;
z-index: 9;
}
.version-picker .dropdown .dropdown-menu li {
font-size: 12px;
padding: 6px 20px;
cursor: pointer;
}
.version-picker .dropdown .dropdown-menu {
padding: 0;
list-style: none;
}
.version-picker .dropdown .dropdown-menu li:hover {
background-color: var(--theme-hover);
}
.version-picker .dropdown .dropdown-menu li.active::before {
display: inline-block;
content: "✓";
margin-inline-start: -14px;
width: 14px;
}

View File

@@ -1,127 +0,0 @@
const dropdown = document.querySelector('.version-picker .dropdown');
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
fetchVersions(dropdown, dropdownMenu).then(() => {
initializeVersionDropdown(dropdown, dropdownMenu);
});
/**
* Initialize the dropdown functionality for version selection.
*
* @param {Element} dropdown - The dropdown element.
* @param {Element} dropdownMenu - The dropdown menu element.
*/
function initializeVersionDropdown(dropdown, dropdownMenu) {
// Toggle the dropdown menu on click
dropdown.addEventListener('click', function () {
this.setAttribute('tabindex', 1);
this.classList.toggle('active');
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
});
// Remove the 'active' class and hide the dropdown menu on focusout
dropdown.addEventListener('focusout', function () {
this.classList.remove('active');
dropdownMenu.style.display = 'none';
});
// Handle item selection within the dropdown menu
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
dropdownMenuItems.forEach(function (item) {
item.addEventListener('click', function () {
dropdownMenuItems.forEach(function (item) {
item.classList.remove('active');
});
this.classList.add('active');
dropdown.querySelector('span').textContent = this.textContent;
dropdown.querySelector('input').value = this.getAttribute('id');
window.location.href = changeVersion(window.location.href, this.textContent);
});
});
};
/**
* This function fetches the available versions from a GitHub repository
* and inserts them into the version picker.
*
* @param {Element} dropdown - The dropdown element.
* @param {Element} dropdownMenu - The dropdown menu element.
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
*/
function fetchVersions(dropdown, dropdownMenu) {
return new Promise((resolve, reject) => {
window.addEventListener("load", () => {
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
cache: "force-cache",
}).then(res =>
res.json()
).then(resObject => {
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
const versions = tree.map(item => item.path).sort(sortVersions);
// Create a list of <li> items for versions
versions.forEach((version) => {
const li = document.createElement("li");
li.textContent = version;
li.id = version;
if (window.SYNAPSE_VERSION === version) {
li.classList.add('active');
dropdown.querySelector('span').textContent = version;
dropdown.querySelector('input').value = version;
}
dropdownMenu.appendChild(li);
});
resolve(versions);
}).catch(ex => {
console.error("Failed to fetch version data", ex);
reject(ex);
})
});
});
}
/**
* Custom sorting function to sort an array of version strings.
*
* @param {string} a - The first version string to compare.
* @param {string} b - The second version string to compare.
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
*/
function sortVersions(a, b) {
// Put 'develop' and 'latest' at the top
if (a === 'develop' || a === 'latest') return -1;
if (b === 'develop' || b === 'latest') return 1;
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
return versionB.localeCompare(versionA);
}
/**
* Change the version in a URL path.
*
* @param {string} url - The original URL to be modified.
* @param {string} newVersion - The new version to replace the existing version in the URL.
* @returns {string} The updated URL with the new version.
*/
function changeVersion(url, newVersion) {
const parsedURL = new URL(url);
const pathSegments = parsedURL.pathname.split('/');
// Modify the version
pathSegments[2] = newVersion;
// Reconstruct the URL
parsedURL.pathname = pathSegments.join('/');
return parsedURL.href;
}

View File

@@ -1 +0,0 @@
window.SYNAPSE_VERSION = 'v1.65';

View File

@@ -84,6 +84,9 @@ disallow_untyped_defs = False
[mypy-synapse.http.matrixfederationclient]
disallow_untyped_defs = False
[mypy-synapse.logging.opentracing]
disallow_untyped_defs = False
[mypy-synapse.metrics._reactor_metrics]
disallow_untyped_defs = False
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.

62
poetry.lock generated
View File

@@ -177,7 +177,7 @@ optional = false
python-versions = "*"
[package.extras]
test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"]
test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"]
[[package]]
name = "constantly"
@@ -290,7 +290,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[[package]]
name = "frozendict"
version = "2.3.3"
version = "2.3.2"
description = "A simple immutable dictionary"
category = "main"
optional = false
@@ -435,8 +435,8 @@ optional = false
python-versions = ">=3.6"
[package.extras]
trio = ["async-generator", "trio"]
test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"]
test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"]
trio = ["trio", "async-generator"]
[[package]]
name = "jinja2"
@@ -535,12 +535,12 @@ attrs = "*"
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
[package.extras]
test = ["aiounittest", "twisted", "tox"]
dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"]
dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"]
test = ["tox", "twisted", "aiounittest"]
[[package]]
name = "matrix-synapse-ldap3"
version = "0.2.2"
version = "0.2.1"
description = "An LDAP3 auth provider for Synapse"
category = "main"
optional = true
@@ -552,7 +552,7 @@ service-identity = "*"
Twisted = ">=15.1.0"
[package.extras]
dev = ["isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "types-setuptools", "mypy (==0.910)", "ldaptor", "tox", "matrix-synapse"]
dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
[[package]]
name = "mccabe"
@@ -820,10 +820,10 @@ optional = false
python-versions = ">=3.6"
[package.extras]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"]
dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"]
crypto = ["cryptography (>=3.3.1)"]
dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"]
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"]
[[package]]
name = "pymacaroons"
@@ -1563,7 +1563,7 @@ url_preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78"
content-hash = "e96625923122e29b6ea5964379828e321b6cede2b020fc32c6f86c09d86d1ae8"
[metadata.files]
attrs = [
@@ -1753,23 +1753,23 @@ flake8-comprehensions = [
{file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
]
frozendict = [
{file = "frozendict-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39942914c1217a5a49c7551495a103b3dbd216e19413687e003b859c6b0ebc12"},
{file = "frozendict-2.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5589256058b31f2b91419fa30b8dc62dbdefe7710e688a3fd5b43849161eecc9"},
{file = "frozendict-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:35eb7e59e287c41f4f712d4d3d2333354175b155d217b97c99c201d2d8920790"},
{file = "frozendict-2.3.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:310aaf81793abf4f471895e6fe65e0e74a28a2aaf7b25c2ba6ccd4e35af06842"},
{file = "frozendict-2.3.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c353c11010a986566a0cb37f9a783c560ffff7d67d5e7fd52221fb03757cdc43"},
{file = "frozendict-2.3.3-cp36-cp36m-win_amd64.whl", hash = "sha256:15b5f82aad108125336593cec1b6420c638bf45f449c57e50949fc7654ea5a41"},
{file = "frozendict-2.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4737e5257756bd6b877504ff50185b705db577b5330d53040a6cf6417bb3cdb"},
{file = "frozendict-2.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a14c11e33e8b0bc09e07bba3732c77a502c39edb8c3959fd9a0e490e031158"},
{file = "frozendict-2.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:027952d1698ac9c766ef43711226b178cdd49d2acbdff396936639ad1d2a5615"},
{file = "frozendict-2.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef818d66c85098a37cf42509545a4ba7dd0c4c679d6262123a8dc14cc474bab7"},
{file = "frozendict-2.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812279f2b270c980112dc4e367b168054f937108f8044eced4199e0ab2945a37"},
{file = "frozendict-2.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c1fb7efbfebc2075f781be3d9774e4ba6ce4fc399148b02097f68d4b3c4bc00a"},
{file = "frozendict-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0b46d4bf95bce843c0151959d54c3e5b8d0ce29cb44794e820b3ec980d63eee"},
{file = "frozendict-2.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38c4660f37fcc70a32ff997fe58e40b3fcc60b2017b286e33828efaa16b01308"},
{file = "frozendict-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:919e3609844fece11ab18bcbf28a3ed20f8108ad4149d7927d413687f281c6c9"},
{file = "frozendict-2.3.3-py3-none-any.whl", hash = "sha256:f988b482d08972a196664718167a993a61c9e9f6fe7b0ca2443570b5f20ca44a"},
{file = "frozendict-2.3.3.tar.gz", hash = "sha256:398539c52af3c647d103185bbaa1291679f0507ad035fe3bab2a8b0366d52cf1"},
{file = "frozendict-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4fb171d1e84d17335365877e19d17440373b47ca74a73c06f65ac0b16d01e87f"},
{file = "frozendict-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a3640e9d7533d164160b758351aa49d9e85bbe0bd76d219d4021e90ffa6a52"},
{file = "frozendict-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:87cfd00fafbc147d8cd2590d1109b7db8ac8d7d5bdaa708ba46caee132b55d4d"},
{file = "frozendict-2.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fb09761e093cfabb2f179dbfdb2521e1ec5701df714d1eb5c51fa7849027be19"},
{file = "frozendict-2.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82176dc7adf01cf8f0193e909401939415a230a1853f4a672ec1629a06ceae18"},
{file = "frozendict-2.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c1c70826aa4a50fa283fe161834ac4a3ac7c753902c980bb8b595b0998a38ddb"},
{file = "frozendict-2.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1db5035ddbed995badd1a62c4102b5e207b5aeb24472df2c60aba79639d7996b"},
{file = "frozendict-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4246fc4cb1413645ba4d3513939b90d979a5bae724be605a10b2b26ee12f839c"},
{file = "frozendict-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:680cd42fb0a255da1ce45678ccbd7f69da750d5243809524ebe8f45b2eda6e6b"},
{file = "frozendict-2.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a7f3a181d6722c92a9fab12d0c5c2b006a18ca5666098531f316d1e1c8984e3"},
{file = "frozendict-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1cb866eabb3c1384a7fe88e1e1033e2b6623073589012ab637c552bf03f6364"},
{file = "frozendict-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:952c5e5e664578c5c2ce8489ee0ab6a1855da02b58ef593ee728fc10d672641a"},
{file = "frozendict-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:608b77904cd0117cd816df605a80d0043a5326ee62529327d2136c792165a823"},
{file = "frozendict-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed41fd326f0bcc779837d8d9e1374da1bc9857fe3b9f2910195bbd5fff3aeb"},
{file = "frozendict-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:bde28db6b5868dd3c45b3555f9d1dc5a1cca6d93591502fa5dcecce0dde6a335"},
{file = "frozendict-2.3.2-py3-none-any.whl", hash = "sha256:6882a9bbe08ab9b5ff96ce11bdff3fe40b114b9813bc6801261e2a7b45e20012"},
{file = "frozendict-2.3.2.tar.gz", hash = "sha256:7fac4542f0a13fbe704db4942f41ba3abffec5af8b100025973e59dff6a09d0d"},
]
gitdb = [
{file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
@@ -2055,8 +2055,8 @@ matrix-common = [
{file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
]
matrix-synapse-ldap3 = [
{file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"},
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
{file = "matrix-synapse-ldap3-0.2.1.tar.gz", hash = "sha256:bfb4390f4a262ffb0d6f057ff3aeb1e46d4e52ff420a064d795fb4f555f00285"},
{file = "matrix_synapse_ldap3-0.2.1-py3-none-any.whl", hash = "sha256:1b3310a60f1d06466f35905a269b6df95747fd1305f2b7fe638f373963b2aa2c"},
]
mccabe = [
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},

View File

@@ -54,7 +54,7 @@ skip_gitignore = true
[tool.poetry]
name = "matrix-synapse"
version = "1.65.0"
version = "1.63.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -110,9 +110,7 @@ jsonschema = ">=3.0.0"
frozendict = ">=1,!=2.1.2"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
# We require 1.5.0 to work around an issue when running against the C implementation of
# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36
canonicaljson = "^1.5.0"
canonicaljson = "^1.4.0"
# we use the type definitions added in signedjson 1.1.
signedjson = "^1.1.0"
# validating SSL certs for IP addresses requires service_identity 18.1.

View File

@@ -26,6 +26,7 @@ DISTS = (
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:impish", # 21.10 (EOL 2022-07)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
)

View File

@@ -101,7 +101,6 @@ if [ -z "$skip_docker_build" ]; then
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
docker build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"

View File

@@ -32,7 +32,6 @@ import click
import commonmark
import git
from click.exceptions import ClickException
from git import GitCommandError, Repo
from github import Github
from packaging import version
@@ -56,12 +55,9 @@ def run_until_successful(
def cli() -> None:
"""An interactive script to walk through the parts of creating a release.
Requirements:
- The dev dependencies be installed, which can be done via:
Requires the dev dependencies be installed, which can be done via:
pip install -e .[dev]
- A checkout of the sytest repository at ../sytest
pip install -e .[dev]
Then to use:
@@ -79,8 +75,6 @@ def cli() -> None:
# Optional: generate some nice links for the announcement
./scripts-dev/release.py merge-back
./scripts-dev/release.py announce
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
@@ -95,12 +89,10 @@ def prepare() -> None:
"""
# Make sure we're in a git repo.
synapse_repo = get_repo_and_check_clean_checkout()
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
repo = get_repo_and_check_clean_checkout()
click.secho("Updating Synapse and Sytest git repos...")
synapse_repo.remote().fetch()
sytest_repo.remote().fetch()
click.secho("Updating git repo...")
repo.remote().fetch()
# Get the current version and AST from root Synapse module.
current_version = get_package_version()
@@ -174,12 +166,12 @@ def prepare() -> None:
assert not parsed_new_version.is_postrelease
release_branch_name = get_release_branch_name(parsed_new_version)
release_branch = find_ref(synapse_repo, release_branch_name)
release_branch = find_ref(repo, release_branch_name)
if release_branch:
if release_branch.is_remote():
# If the release branch only exists on the remote we check it out
# locally.
synapse_repo.git.checkout(release_branch_name)
repo.git.checkout(release_branch_name)
else:
# If a branch doesn't exist we create one. We ask which one branch it
# should be based off, defaulting to sensible values depending on the
@@ -195,34 +187,25 @@ def prepare() -> None:
"Which branch should the release be based on?", default=default
)
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
base_branch = find_ref(repo, branch_name)
if not base_branch:
print(f"Could not find base branch {branch_name} for {repo_name}!")
click.get_current_context().abort()
base_branch = find_ref(repo, branch_name)
if not base_branch:
print(f"Could not find base branch {branch_name}!")
click.get_current_context().abort()
# Check out the base branch and ensure it's up to date
repo.head.set_reference(
base_branch, f"check out the base branch for {repo_name}"
)
repo.head.reset(index=True, working_tree=True)
if not base_branch.is_remote():
update_branch(repo)
# Check out the base branch and ensure it's up to date
repo.head.set_reference(base_branch, "check out the base branch")
repo.head.reset(index=True, working_tree=True)
if not base_branch.is_remote():
update_branch(repo)
# Create the new release branch
# Type ignore will no longer be needed after GitPython 3.1.28.
# See https://github.com/gitpython-developers/GitPython/pull/1419
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
# Special-case SyTest: we don't actually prepare any files so we may
# as well push it now (and only when we create a release branch;
# not on subsequent RCs or full releases).
if click.confirm("Push new SyTest branch?", default=True):
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
# Create the new release branch
# Type ignore will no longer be needed after GitPython 3.1.28.
# See https://github.com/gitpython-developers/GitPython/pull/1419
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
# Switch to the release branch and ensure it's up to date.
synapse_repo.git.checkout(release_branch_name)
update_branch(synapse_repo)
repo.git.checkout(release_branch_name)
update_branch(repo)
# Update the version specified in pyproject.toml.
subprocess.check_output(["poetry", "version", new_version])
@@ -247,15 +230,15 @@ def prepare() -> None:
run_until_successful('dch -M -r -D stable ""', shell=True)
# Show the user the changes and ask if they want to edit the change log.
synapse_repo.git.add("-u")
repo.git.add("-u")
subprocess.run("git diff --cached", shell=True)
if click.confirm("Edit changelog?", default=False):
click.edit(filename="CHANGES.md")
# Commit the changes.
synapse_repo.git.add("-u")
synapse_repo.git.commit("-m", new_version)
repo.git.add("-u")
repo.git.commit("-m", new_version)
# We give the option to bail here in case the user wants to make sure things
# are OK before pushing.
@@ -263,21 +246,17 @@ def prepare() -> None:
print("")
print("Run when ready to push:")
print("")
print(
f"\tgit push -u {synapse_repo.remote().name} {synapse_repo.active_branch.name}"
)
print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}")
print("")
sys.exit(0)
# Otherwise, push and open the changelog in the browser.
synapse_repo.git.push(
"-u", synapse_repo.remote().name, synapse_repo.active_branch.name
)
repo.git.push("-u", repo.remote().name, repo.active_branch.name)
print("Opening the changelog in your browser...")
print("Please ask others to give it a check.")
click.launch(
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md"
)
@@ -444,79 +423,6 @@ def upload() -> None:
)
def _merge_into(repo: Repo, source: str, target: str) -> None:
"""
Merges branch `source` into branch `target`.
Pulls both before merging and pushes the result.
"""
# Update our branches and switch to the target branch
for branch in [source, target]:
click.echo(f"Switching to {branch} and pulling...")
repo.heads[branch].checkout()
# Pull so we're up to date
repo.remote().pull()
assert repo.active_branch.name == target
try:
# TODO This seemed easier than using GitPython directly
click.echo(f"Merging {source}...")
repo.git.merge(source)
except GitCommandError as exc:
# If a merge conflict occurs, give some context and try to
# make it easy to abort if necessary.
click.echo(exc)
if not click.confirm(
f"Likely merge conflict whilst merging ({source}{target}). "
f"Have you resolved it?"
):
repo.git.merge("--abort")
return
# Push result.
click.echo("Pushing...")
repo.remote().push()
@cli.command()
def merge_back() -> None:
"""Merge the release branch back into the appropriate branches.
All branches will be automatically pulled from the remote and the results
will be pushed to the remote."""
synapse_repo = get_repo_and_check_clean_checkout()
branch_name = synapse_repo.active_branch.name
if not branch_name.startswith("release-v"):
raise RuntimeError("Not on a release branch. This does not seem sensible.")
# Pull so we're up to date
synapse_repo.remote().pull()
current_version = get_package_version()
if current_version.is_prerelease:
# Release candidate
if click.confirm(f"Merge {branch_name} → develop?", default=True):
_merge_into(synapse_repo, branch_name, "develop")
else:
# Full release
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
if click.confirm(f"Merge {branch_name} → master?", default=True):
_merge_into(synapse_repo, branch_name, "master")
if click.confirm("Merge master → develop?", default=True):
_merge_into(synapse_repo, "master", "develop")
if click.confirm(f"On SyTest, merge {branch_name} → master?", default=True):
_merge_into(sytest_repo, branch_name, "master")
if click.confirm("On SyTest, merge master → develop?", default=True):
_merge_into(sytest_repo, "master", "develop")
@cli.command()
def announce() -> None:
"""Generate markdown to announce the release."""
@@ -563,18 +469,14 @@ def get_release_branch_name(version_number: version.Version) -> str:
return f"release-v{version_number.major}.{version_number.minor}"
def get_repo_and_check_clean_checkout(
path: str = ".", name: str = "synapse"
) -> git.Repo:
def get_repo_and_check_clean_checkout() -> git.Repo:
"""Get the project repo and check it's not got any uncommitted changes."""
try:
repo = git.Repo(path=path)
repo = git.Repo()
except git.InvalidGitRepositoryError:
raise click.ClickException(
f"{path} is not a git repository (expecting a {name} repository)."
)
raise click.ClickException("Not in Synapse repo.")
if repo.is_dirty():
raise click.ClickException(f"Uncommitted changes exist in {path}.")
raise click.ClickException("Uncommitted changes exist.")
return repo

View File

@@ -33,7 +33,7 @@ def main() -> None:
parser.add_argument(
"--report-stats",
action="store",
help="Whether the generated config reports homeserver usage statistics",
help="Whether the generated config reports anonymized usage statistics",
choices=["yes", "no"],
)

View File

@@ -26,17 +26,11 @@ from synapse.api.errors import (
Codes,
InvalidClientTokenError,
MissingClientTokenError,
UnstableSpecAuthError,
)
from synapse.appservice import ApplicationService
from synapse.http import get_request_user_agent
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import (
active_span,
force_tracing,
start_active_span,
trace,
)
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import Requester, UserID, create_requester
@@ -112,11 +106,8 @@ class Auth:
forgot = await self.store.did_forget(user_id, room_id)
if not forgot:
return membership, member_event_id
raise UnstableSpecAuthError(
403,
"User %s not in room %s" % (user_id, room_id),
errcode=Codes.NOT_JOINED,
)
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
async def get_user_by_req(
self,
@@ -572,7 +563,6 @@ class Auth:
return query_params[0].decode("ascii")
@trace
async def check_user_in_room_or_world_readable(
self, room_id: str, user_id: str, allow_departed_users: bool = False
) -> Tuple[str, Optional[str]]:
@@ -610,9 +600,8 @@ class Auth:
== HistoryVisibility.WORLD_READABLE
):
return Membership.JOIN, None
raise UnstableSpecAuthError(
raise AuthError(
403,
"User %s not in room %s, and room previews are disabled"
% (user_id, room_id),
errcode=Codes.NOT_JOINED,
)

View File

@@ -257,8 +257,7 @@ class GuestAccess:
class ReceiptTypes:
READ: Final = "m.read"
READ_PRIVATE: Final = "m.read.private"
UNSTABLE_READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
FULLY_READ: Final = "m.fully_read"
@@ -269,4 +268,4 @@ class PublicRoomsFilterFields:
"""
GENERIC_SEARCH_TERM: Final = "generic_search_term"
ROOM_TYPES: Final = "room_types"
ROOM_TYPES: Final = "org.matrix.msc3827.room_types"

View File

@@ -26,7 +26,6 @@ from twisted.web import http
from synapse.util import json_decoder
if typing.TYPE_CHECKING:
from synapse.config.homeserver import HomeServerConfig
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@@ -81,12 +80,6 @@ class Codes(str, Enum):
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
# Part of MSC3848
# https://github.com/matrix-org/matrix-spec-proposals/pull/3848
ALREADY_JOINED = "ORG.MATRIX.MSC3848.ALREADY_JOINED"
NOT_JOINED = "ORG.MATRIX.MSC3848.NOT_JOINED"
INSUFFICIENT_POWER = "ORG.MATRIX.MSC3848.INSUFFICIENT_POWER"
# The account has been suspended on the server.
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
# that can possibly be appealed and reverted.
@@ -174,7 +167,7 @@ class SynapseError(CodeMessageException):
else:
self._additional_fields = dict(additional_fields)
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, **self._additional_fields)
@@ -220,7 +213,7 @@ class ConsentNotGivenError(SynapseError):
)
self._consent_uri = consent_uri
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
@@ -314,37 +307,6 @@ class AuthError(SynapseError):
super().__init__(code, msg, errcode, additional_fields)
class UnstableSpecAuthError(AuthError):
"""An error raised when a new error code is being proposed to replace a previous one.
This error will return a "org.matrix.unstable.errcode" property with the new error code,
with the previous error code still being defined in the "errcode" property.
This error will include `org.matrix.msc3848.unstable.errcode` in the C-S error body.
"""
def __init__(
self,
code: int,
msg: str,
errcode: str,
previous_errcode: str = Codes.FORBIDDEN,
additional_fields: Optional[dict] = None,
):
self.previous_errcode = previous_errcode
super().__init__(code, msg, errcode, additional_fields)
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
fields = {}
if config is not None and config.experimental.msc3848_enabled:
fields["org.matrix.msc3848.unstable.errcode"] = self.errcode
return cs_error(
self.msg,
self.previous_errcode,
**fields,
**self._additional_fields,
)
class InvalidClientCredentialsError(SynapseError):
"""An error raised when there was a problem with the authorisation credentials
in a client request.
@@ -376,8 +338,8 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
self._soft_logout = soft_logout
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
d = super().error_dict(config)
def error_dict(self) -> "JsonDict":
d = super().error_dict()
d["soft_logout"] = self._soft_logout
return d
@@ -400,7 +362,7 @@ class ResourceLimitError(SynapseError):
self.limit_type = limit_type
super().__init__(code, msg, errcode=errcode)
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(
self.msg,
self.errcode,
@@ -435,7 +397,7 @@ class InvalidCaptchaError(SynapseError):
super().__init__(code, msg, errcode)
self.error_url = error_url
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, error_url=self.error_url)
@@ -452,7 +414,7 @@ class LimitExceededError(SynapseError):
super().__init__(code, msg, errcode)
self.retry_after_ms = retry_after_ms
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
@@ -467,7 +429,7 @@ class RoomKeysVersionError(SynapseError):
super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION)
self.current_version = current_version
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, current_version=self.current_version)
@@ -507,7 +469,7 @@ class IncompatibleRoomVersionError(SynapseError):
self._room_version = room_version
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, room_version=self._room_version)
@@ -553,7 +515,7 @@ class UnredactedContentDeletedError(SynapseError):
)
self.content_keep_ms = content_keep_ms
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
def error_dict(self) -> "JsonDict":
extra = {}
if self.content_keep_ms is not None:
extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms}

View File

@@ -17,7 +17,7 @@ from collections import OrderedDict
from typing import Hashable, Optional, Tuple
from synapse.api.errors import LimitExceededError
from synapse.config.ratelimiting import RatelimitSettings
from synapse.config.ratelimiting import RateLimitConfig
from synapse.storage.databases.main import DataStore
from synapse.types import Requester
from synapse.util import Clock
@@ -314,8 +314,8 @@ class RequestRatelimiter:
self,
store: DataStore,
clock: Clock,
rc_message: RatelimitSettings,
rc_admin_redaction: Optional[RatelimitSettings],
rc_message: RateLimitConfig,
rc_admin_redaction: Optional[RateLimitConfig],
):
self.store = store
self.clock = clock

View File

@@ -28,22 +28,19 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.events import EventBase
from synapse.handlers.admin import ExfiltrationWriter
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.appservice import (
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -52,17 +49,16 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
RegistrationWorkerStore,
ReceiptsWorkerStore,
BaseSlavedStore,
RoomWorkerStore,
):
def __init__(

View File

@@ -48,12 +48,20 @@ from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
@@ -92,15 +100,8 @@ from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.appservice import (
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
from synapse.storage.databases.main.lock import LockStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
@@ -109,15 +110,11 @@ from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
@@ -230,11 +227,11 @@ class GenericWorkerSlavedStore(
UIAuthWorkerStore,
EndToEndRoomKeyStore,
PresenceStore,
DeviceInboxWorkerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
TagsWorkerStore,
AccountDataWorkerStore,
SlavedAccountDataStore,
SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
@@ -242,20 +239,19 @@ class GenericWorkerSlavedStore(
SlavedKeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
ProfileWorkerStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedProfileStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
ReceiptsWorkerStore,
RegistrationWorkerStore,
SearchStore,
TransactionWorkerStore,
LockStore,
SessionStore,
BaseSlavedStore,
):
# Properties that multiple storage classes define. Tell mypy what the
# expected type is.

View File

@@ -44,7 +44,6 @@ from synapse.app._base import (
register_start,
)
from synapse.config._base import ConfigError, format_config_error
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
@@ -202,7 +201,7 @@ class SynapseHomeServer(HomeServer):
}
)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
if self.config.email.can_verify_email:
from synapse.rest.synapse.client.password_reset import (
PasswordResetSubmitTokenResource,
)

View File

@@ -97,16 +97,16 @@ def format_config_error(e: ConfigError) -> Iterator[str]:
# We split these messages out to allow packages to override with package
# specific instructions.
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
Please opt in or out of reporting homeserver usage statistics, by setting
the `report_stats` key in your config file to either True or False.
Please opt in or out of reporting anonymized homeserver usage statistics, by
setting the `report_stats` key in your config file to either True or False.
"""
MISSING_REPORT_STATS_SPIEL = """\
We would really appreciate it if you could help our project out by reporting
homeserver usage statistics from your homeserver. Your homeserver's server name,
along with very basic aggregate data (e.g. number of users) will be reported. But
it helps us to track the growth of the Matrix community, and helps us to make Matrix
a success, as well as to convince other networks that they should peer with us.
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Thank you.
"""
@@ -621,7 +621,7 @@ class RootConfig:
generate_group.add_argument(
"--report-stats",
action="store",
help="Whether the generated config reports homeserver usage statistics.",
help="Whether the generated config reports anonymized usage statistics.",
choices=["yes", "no"],
)
generate_group.add_argument(

View File

@@ -18,7 +18,6 @@
import email.utils
import logging
import os
from enum import Enum
from typing import Any
import attr
@@ -86,19 +85,14 @@ class EmailConfig(Config):
if email_config is None:
email_config = {}
self.force_tls = email_config.get("force_tls", False)
self.email_smtp_host = email_config.get("smtp_host", "localhost")
self.email_smtp_port = email_config.get(
"smtp_port", 465 if self.force_tls else 25
)
self.email_smtp_port = email_config.get("smtp_port", 25)
self.email_smtp_user = email_config.get("smtp_user", None)
self.email_smtp_pass = email_config.get("smtp_pass", None)
self.require_transport_security = email_config.get(
"require_transport_security", False
)
self.enable_smtp_tls = email_config.get("enable_tls", True)
if self.force_tls and not self.enable_smtp_tls:
raise ConfigError("email.force_tls requires email.enable_tls to be true")
if self.require_transport_security and not self.enable_smtp_tls:
raise ConfigError(
"email.require_transport_security requires email.enable_tls to be true"
@@ -136,40 +130,22 @@ class EmailConfig(Config):
self.email_enable_notifs = email_config.get("enable_notifs", False)
self.threepid_behaviour_email = (
# Have Synapse handle the email sending if account_threepid_delegates.email
# is not defined
# msisdn is currently always remote while Synapse does not support any method of
# sending SMS messages
ThreepidBehaviour.REMOTE
if self.root.registration.account_threepid_delegate_email
else ThreepidBehaviour.LOCAL
)
if config.get("trust_identity_server_for_password_resets"):
raise ConfigError(
'The config option "trust_identity_server_for_password_resets" has been removed.'
"Please consult the configuration manual at docs/usage/configuration/config_documentation.md for "
"details and update your config file."
'The config option "trust_identity_server_for_password_resets" '
"is no longer supported. Please remove it from the config file."
)
self.local_threepid_handling_disabled_due_to_email_config = False
if (
self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
and email_config == {}
):
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
self.local_threepid_handling_disabled_due_to_email_config = True
self.threepid_behaviour_email = ThreepidBehaviour.OFF
# If we have email config settings, assume that we can verify ownership of
# email addresses.
self.can_verify_email = email_config != {}
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
email_config.get("validation_token_lifetime", "1h")
)
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
if self.can_verify_email:
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
@@ -360,18 +336,3 @@ class EmailConfig(Config):
"Config option email.invite_client_location must be a http or https URL",
path=("email", "invite_client_location"),
)
class ThreepidBehaviour(Enum):
"""
Enum to define the behaviour of Synapse with regards to when it contacts an identity
server for 3pid registration and password resets
REMOTE = use an external server to send tokens
LOCAL = send tokens ourselves
OFF = disable registration via 3pid and password resets
"""
REMOTE = "remote"
LOCAL = "local"
OFF = "off"

View File

@@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
# MSC2716 (importing historical messages)
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
# MSC2285 (unstable private read receipts)
# MSC2285 (private read receipts)
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
# MSC3244 (room version capabilities)
@@ -88,5 +88,5 @@ class ExperimentalConfig(Config):
# MSC3715: dir param on /relations.
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
# MSC3848: Introduce errcodes for specific event sending failures
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
# MSC3827: Filtering of /publicRooms by room type
self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False)

View File

@@ -21,7 +21,7 @@ from synapse.types import JsonDict
from ._base import Config
class RatelimitSettings:
class RateLimitConfig:
def __init__(
self,
config: Dict[str, float],
@@ -34,7 +34,7 @@ class RatelimitSettings:
@attr.s(auto_attribs=True)
class FederationRatelimitSettings:
class FederationRateLimitConfig:
window_size: int = 1000
sleep_limit: int = 10
sleep_delay: int = 500
@@ -50,11 +50,11 @@ class RatelimitConfig(Config):
# Load the new-style messages config if it exists. Otherwise fall back
# to the old method.
if "rc_message" in config:
self.rc_message = RatelimitSettings(
self.rc_message = RateLimitConfig(
config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
)
else:
self.rc_message = RatelimitSettings(
self.rc_message = RateLimitConfig(
{
"per_second": config.get("rc_messages_per_second", 0.2),
"burst_count": config.get("rc_message_burst_count", 10.0),
@@ -64,9 +64,9 @@ class RatelimitConfig(Config):
# Load the new-style federation config, if it exists. Otherwise, fall
# back to the old method.
if "rc_federation" in config:
self.rc_federation = FederationRatelimitSettings(**config["rc_federation"])
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
else:
self.rc_federation = FederationRatelimitSettings(
self.rc_federation = FederationRateLimitConfig(
**{
k: v
for k, v in {
@@ -80,17 +80,17 @@ class RatelimitConfig(Config):
}
)
self.rc_registration = RatelimitSettings(config.get("rc_registration", {}))
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
self.rc_registration_token_validity = RatelimitSettings(
self.rc_registration_token_validity = RateLimitConfig(
config.get("rc_registration_token_validity", {}),
defaults={"per_second": 0.1, "burst_count": 5},
)
rc_login_config = config.get("rc_login", {})
self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {}))
self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {}))
self.rc_login_failed_attempts = RatelimitSettings(
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
self.rc_login_failed_attempts = RateLimitConfig(
rc_login_config.get("failed_attempts", {})
)
@@ -101,54 +101,47 @@ class RatelimitConfig(Config):
rc_admin_redaction = config.get("rc_admin_redaction")
self.rc_admin_redaction = None
if rc_admin_redaction:
self.rc_admin_redaction = RatelimitSettings(rc_admin_redaction)
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
self.rc_joins_local = RatelimitSettings(
self.rc_joins_local = RateLimitConfig(
config.get("rc_joins", {}).get("local", {}),
defaults={"per_second": 0.1, "burst_count": 10},
)
self.rc_joins_remote = RatelimitSettings(
self.rc_joins_remote = RateLimitConfig(
config.get("rc_joins", {}).get("remote", {}),
defaults={"per_second": 0.01, "burst_count": 10},
)
# Track the rate of joins to a given room. If there are too many, temporarily
# prevent local joins and remote joins via this server.
self.rc_joins_per_room = RatelimitSettings(
config.get("rc_joins_per_room", {}),
defaults={"per_second": 1, "burst_count": 10},
)
# Ratelimit cross-user key requests:
# * For local requests this is keyed by the sending device.
# * For requests received over federation this is keyed by the origin.
#
# Note that this isn't exposed in the configuration as it is obscure.
self.rc_key_requests = RatelimitSettings(
self.rc_key_requests = RateLimitConfig(
config.get("rc_key_requests", {}),
defaults={"per_second": 20, "burst_count": 100},
)
self.rc_3pid_validation = RatelimitSettings(
self.rc_3pid_validation = RateLimitConfig(
config.get("rc_3pid_validation") or {},
defaults={"per_second": 0.003, "burst_count": 5},
)
self.rc_invites_per_room = RatelimitSettings(
self.rc_invites_per_room = RateLimitConfig(
config.get("rc_invites", {}).get("per_room", {}),
defaults={"per_second": 0.3, "burst_count": 10},
)
self.rc_invites_per_user = RatelimitSettings(
self.rc_invites_per_user = RateLimitConfig(
config.get("rc_invites", {}).get("per_user", {}),
defaults={"per_second": 0.003, "burst_count": 5},
)
self.rc_invites_per_issuer = RatelimitSettings(
self.rc_invites_per_issuer = RateLimitConfig(
config.get("rc_invites", {}).get("per_issuer", {}),
defaults={"per_second": 0.3, "burst_count": 10},
)
self.rc_third_party_invite = RatelimitSettings(
self.rc_third_party_invite = RateLimitConfig(
config.get("rc_third_party_invite", {}),
defaults={
"per_second": self.rc_message.per_second,

View File

@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from typing import Any, Optional
from synapse.api.constants import RoomCreationPreset
@@ -21,15 +20,11 @@ from synapse.config._base import Config, ConfigError
from synapse.types import JsonDict, RoomAlias, UserID
from synapse.util.stringutils import random_string_with_symbols, strtobool
logger = logging.getLogger(__name__)
LEGACY_EMAIL_DELEGATE_WARNING = """\
Delegation of email verification to an identity server is now deprecated. To
NO_EMAIL_DELEGATE_ERROR = """\
Delegation of email verification to an identity server is no longer supported. To
continue to allow users to add email addresses to their accounts, and use them for
password resets, configure Synapse with an SMTP server via the `email` setting, and
remove `account_threepid_delegates.email`.
This will be an error in a future version.
"""
@@ -64,9 +59,8 @@ class RegistrationConfig(Config):
account_threepid_delegates = config.get("account_threepid_delegates") or {}
if "email" in account_threepid_delegates:
logger.warning(LEGACY_EMAIL_DELEGATE_WARNING)
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
raise ConfigError(NO_EMAIL_DELEGATE_ERROR)
# self.account_threepid_delegate_email = account_threepid_delegates.get("email")
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)

View File

@@ -42,18 +42,6 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
# A map from the given media type to the type of thumbnail we should generate
# for it.
THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP = {
"image/jpeg": "jpeg",
"image/jpg": "jpeg",
"image/webp": "jpeg",
# Thumbnails can only be jpeg or png. We choose png thumbnails for gif
# because it can have transparency.
"image/gif": "png",
"image/png": "png",
}
HTTP_PROXY_SET_WARNING = """\
The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured."""
@@ -91,22 +79,13 @@ def parse_thumbnail_requirements(
width = size["width"]
height = size["height"]
method = size["method"]
for format, thumbnail_format in THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.items():
requirement = requirements.setdefault(format, [])
if thumbnail_format == "jpeg":
requirement.append(
ThumbnailRequirement(width, height, method, "image/jpeg")
)
elif thumbnail_format == "png":
requirement.append(
ThumbnailRequirement(width, height, method, "image/png")
)
else:
raise Exception(
"Unknown thumbnail mapping from %s to %s. This is a Synapse problem, please report!"
% (format, thumbnail_format)
)
jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg")
png_thumbnail = ThumbnailRequirement(width, height, method, "image/png")
requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail)
requirements.setdefault("image/jpg", []).append(jpeg_thumbnail)
requirements.setdefault("image/webp", []).append(jpeg_thumbnail)
requirements.setdefault("image/gif", []).append(png_thumbnail)
requirements.setdefault("image/png", []).append(png_thumbnail)
return {
media_type: tuple(thumbnails) for media_type, thumbnails in requirements.items()
}

View File

@@ -30,13 +30,7 @@ from synapse.api.constants import (
JoinRules,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
EventSizeError,
SynapseError,
UnstableSpecAuthError,
)
from synapse.api.errors import AuthError, EventSizeError, SynapseError
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
@@ -297,11 +291,7 @@ def check_state_dependent_auth_rules(
invite_level = get_named_level(auth_dict, "invite", 0)
if user_level < invite_level:
raise UnstableSpecAuthError(
403,
"You don't have permission to invite users",
errcode=Codes.INSUFFICIENT_POWER,
)
raise AuthError(403, "You don't have permission to invite users")
else:
logger.debug("Allowing! %s", event)
return
@@ -484,11 +474,7 @@ def _is_membership_change_allowed(
return
if not caller_in_room: # caller isn't joined
raise UnstableSpecAuthError(
403,
"%s not in room %s." % (event.user_id, event.room_id),
errcode=Codes.NOT_JOINED,
)
raise AuthError(403, "%s not in room %s." % (event.user_id, event.room_id))
if Membership.INVITE == membership:
# TODO (erikj): We should probably handle this more intelligently
@@ -498,18 +484,10 @@ def _is_membership_change_allowed(
if target_banned:
raise AuthError(403, "%s is banned from the room" % (target_user_id,))
elif target_in_room: # the target is already in the room.
raise UnstableSpecAuthError(
403,
"%s is already in the room." % target_user_id,
errcode=Codes.ALREADY_JOINED,
)
raise AuthError(403, "%s is already in the room." % target_user_id)
else:
if user_level < invite_level:
raise UnstableSpecAuthError(
403,
"You don't have permission to invite users",
errcode=Codes.INSUFFICIENT_POWER,
)
raise AuthError(403, "You don't have permission to invite users")
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and:
# * They are not banned.
@@ -571,27 +549,15 @@ def _is_membership_change_allowed(
elif Membership.LEAVE == membership:
# TODO (erikj): Implement kicks.
if target_banned and user_level < ban_level:
raise UnstableSpecAuthError(
403,
"You cannot unban user %s." % (target_user_id,),
errcode=Codes.INSUFFICIENT_POWER,
)
raise AuthError(403, "You cannot unban user %s." % (target_user_id,))
elif target_user_id != event.user_id:
kick_level = get_named_level(auth_events, "kick", 50)
if user_level < kick_level or user_level <= target_level:
raise UnstableSpecAuthError(
403,
"You cannot kick user %s." % target_user_id,
errcode=Codes.INSUFFICIENT_POWER,
)
raise AuthError(403, "You cannot kick user %s." % target_user_id)
elif Membership.BAN == membership:
if user_level < ban_level or user_level <= target_level:
raise UnstableSpecAuthError(
403,
"You don't have permission to ban",
errcode=Codes.INSUFFICIENT_POWER,
)
raise AuthError(403, "You don't have permission to ban")
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
if join_rule != JoinRules.KNOCK and (
not room_version.msc3787_knock_restricted_join_rule
@@ -601,11 +567,7 @@ def _is_membership_change_allowed(
elif target_user_id != event.user_id:
raise AuthError(403, "You cannot knock for other users")
elif target_in_room:
raise UnstableSpecAuthError(
403,
"You cannot knock on a room you are already in",
errcode=Codes.ALREADY_JOINED,
)
raise AuthError(403, "You cannot knock on a room you are already in")
elif caller_invited:
raise AuthError(403, "You are already invited to this room")
elif target_banned:
@@ -676,11 +638,10 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b
user_level = get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
raise UnstableSpecAuthError(
raise AuthError(
403,
"You don't have permission to post that to the room. "
+ "user_level (%d) < send_level (%d)" % (user_level, send_level),
errcode=Codes.INSUFFICIENT_POWER,
)
# Check state_key
@@ -755,10 +716,9 @@ def check_historical(
historical_level = get_named_level(auth_events, "historical", 100)
if user_level < historical_level:
raise UnstableSpecAuthError(
raise AuthError(
403,
'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
errcode=Codes.INSUFFICIENT_POWER,
)

View File

@@ -24,11 +24,9 @@ from synapse.api.room_versions import (
RoomVersion,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.state import StateHandler
from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter
from synapse.types import EventID, JsonDict
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -123,11 +121,7 @@ class EventBuilder:
"""
if auth_event_ids is None:
state_ids = await self._state.compute_state_after_events(
self.room_id,
prev_event_ids,
state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self)
),
self.room_id, prev_event_ids
)
auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids

View File

@@ -11,10 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional, Tuple
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import attr
from frozendict import frozendict
from typing_extensions import Literal
from synapse.appservice import ApplicationService
from synapse.events import EventBase
@@ -32,7 +33,7 @@ class EventContext:
Holds information relevant to persisting an event
Attributes:
rejected: A rejection reason if the event was rejected, else None
rejected: A rejection reason if the event was rejected, else False
_state_group: The ID of the state group for this event. Note that state events
are persisted with a state group which includes the new event, so this is
@@ -84,7 +85,7 @@ class EventContext:
"""
_storage: "StorageControllers"
rejected: Optional[str] = None
rejected: Union[Literal[False], str] = False
_state_group: Optional[int] = None
state_group_before_event: Optional[int] = None
_state_delta_due_to_event: Optional[StateMap[str]] = None

View File

@@ -53,7 +53,7 @@ from synapse.api.room_versions import (
RoomVersion,
RoomVersions,
)
from synapse.events import EventBase, builder, make_event_from_dict
from synapse.events import EventBase, builder
from synapse.federation.federation_base import (
FederationBase,
InvalidEventSignatureError,
@@ -61,7 +61,6 @@ from synapse.federation.federation_base import (
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.types import QueryParams
from synapse.logging.opentracing import trace
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
@@ -218,7 +217,7 @@ class FederationClient(FederationBase):
)
async def claim_client_keys(
self, destination: str, content: JsonDict, timeout: Optional[int]
self, destination: str, content: JsonDict, timeout: int
) -> JsonDict:
"""Claims one-time keys for a device hosted on a remote server.
@@ -234,7 +233,6 @@ class FederationClient(FederationBase):
destination, content, timeout
)
@trace
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> Optional[List[EventBase]]:
@@ -301,8 +299,7 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
A copy of the requested PDU that is safe to modify, or None if we
were unable to find it.
The requested PDU, or None if we were unable to find it.
Raises:
SynapseError, NotRetryingDestination, FederationDeniedError
@@ -312,7 +309,7 @@ class FederationClient(FederationBase):
)
logger.debug(
"get_pdu_from_destination_raw: retrieved event id %s from %s: %r",
"retrieved event id %s from %s: %r",
event_id,
destination,
transaction_data,
@@ -361,92 +358,54 @@ class FederationClient(FederationBase):
The requested PDU, or None if we were unable to find it.
"""
logger.debug(
"get_pdu: event_id=%s from destinations=%s", event_id, destinations
)
# TODO: Rate limit the number of times we try and get the same event.
# We might need the same event multiple times in quick succession (before
# it gets persisted to the database), so we cache the results of the lookup.
# Note that this is separate to the regular get_event cache which caches
# events once they have been persisted.
event = self._get_pdu_cache.get(event_id)
ev = self._get_pdu_cache.get(event_id)
if ev:
return ev
# If we don't see the event in the cache, go try to fetch it from the
# provided remote federated destinations
if not event:
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
logger.debug(
"get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
destination,
last_attempt,
PDU_RETRY_TIME_MS,
now,
)
continue
signed_pdu = None
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
continue
try:
event = await self.get_pdu_from_destination_raw(
destination=destination,
event_id=event_id,
room_version=room_version,
timeout=timeout,
)
try:
signed_pdu = await self.get_pdu_from_destination_raw(
destination=destination,
event_id=event_id,
room_version=room_version,
timeout=timeout,
)
pdu_attempts[destination] = now
pdu_attempts[destination] = now
if event:
# Prime the cache
self._get_pdu_cache[event.event_id] = event
except SynapseError as e:
logger.info(
"Failed to get PDU %s from %s because %s", event_id, destination, e
)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
except FederationDeniedError as e:
logger.info(str(e))
continue
except Exception as e:
pdu_attempts[destination] = now
# Now that we have an event, we can break out of this
# loop and stop asking other destinations.
break
logger.info(
"Failed to get PDU %s from %s because %s", event_id, destination, e
)
continue
except SynapseError as e:
logger.info(
"Failed to get PDU %s from %s because %s",
event_id,
destination,
e,
)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
except FederationDeniedError as e:
logger.info(str(e))
continue
except Exception as e:
pdu_attempts[destination] = now
if signed_pdu:
self._get_pdu_cache[event_id] = signed_pdu
logger.info(
"Failed to get PDU %s from %s because %s",
event_id,
destination,
e,
)
continue
if not event:
return None
# `event` now refers to an object stored in `get_pdu_cache`. Our
# callers may need to modify the returned object (eg to set
# `event.internal_metadata.outlier = true`), so we return a copy
# rather than the original object.
event_copy = make_event_from_dict(
event.get_pdu_json(),
event.room_version,
)
return event_copy
return signed_pdu
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
@@ -727,12 +686,6 @@ class FederationClient(FederationBase):
if failover_errcodes is None:
failover_errcodes = ()
if not destinations:
# Give a bit of a clearer message if no servers were specified at all.
raise SynapseError(
502, f"Failed to {description} via any server: No servers specified."
)
for destination in destinations:
if destination == self.server_name:
continue
@@ -782,7 +735,7 @@ class FederationClient(FederationBase):
"Failed to %s via %s", description, destination, exc_info=True
)
raise SynapseError(502, f"Failed to {description} via any server")
raise SynapseError(502, "Failed to %s via any server" % (description,))
async def make_membership_event(
self,

View File

@@ -118,7 +118,6 @@ class FederationServer(FederationBase):
self._federation_event_handler = hs.get_federation_event_handler()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self._room_member_handler = hs.get_room_member_handler()
self._state_storage_controller = hs.get_storage_controllers().state
@@ -469,7 +468,7 @@ class FederationServer(FederationBase):
)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict(self.hs.config)
pdu_results[event_id] = e.error_dict()
return
for pdu in pdus_by_room[room_id]:
@@ -622,15 +621,6 @@ class FederationServer(FederationBase):
)
raise IncompatibleRoomVersionError(room_version=room_version)
# Refuse the request if that room has seen too many joins recently.
# This is in addition to the HS-level rate limiting applied by
# BaseFederationServlet.
# type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?)
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
requester=None,
key=room_id,
update=False,
)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
return {"event": pdu.get_templated_pdu_json(), "room_version": room_version}
@@ -665,12 +655,6 @@ class FederationServer(FederationBase):
room_id: str,
caller_supports_partial_state: bool = False,
) -> Dict[str, Any]:
await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type]
requester=None,
key=room_id,
update=False,
)
event, context = await self._on_send_membership_event(
origin, content, Membership.JOIN, room_id
)
@@ -843,25 +827,8 @@ class FederationServer(FederationBase):
Codes.BAD_JSON,
)
# Note that get_room_version throws if the room does not exist here.
room_version = await self.store.get_room_version(room_id)
if await self.store.is_partial_state_room(room_id):
# If our server is still only partially joined, we can't give a complete
# response to /send_join, /send_knock or /send_leave.
# This is because we will not be able to provide the server list (for partial
# joins) or the full state (for full joins).
# Return a 404 as we would if we weren't in the room at all.
logger.info(
f"Rejecting /send_{membership_type} to %s because it's a partial state room",
room_id,
)
raise SynapseError(
404,
f"Unable to handle /send_{membership_type} right now; this server is not fully joined.",
errcode=Codes.NOT_FOUND,
)
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
raise SynapseError(
403,

View File

@@ -619,7 +619,7 @@ class TransportLayerClient:
)
async def claim_client_keys(
self, destination: str, query_content: JsonDict, timeout: Optional[int]
self, destination: str, query_content: JsonDict, timeout: int
) -> JsonDict:
"""Claim one-time keys for a list of devices hosted on a remote server.

View File

@@ -309,7 +309,7 @@ class BaseFederationServlet:
raise
# update the active opentracing span with the authenticated entity
set_tag("authenticated_entity", str(origin))
set_tag("authenticated_entity", origin)
# if the origin is authenticated and whitelisted, use its span context
# as the parent.

View File

@@ -565,7 +565,7 @@ class AuthHandler:
except LoginError as e:
# this step failed. Merge the error dict into the response
# so that the client can have another go.
errordict = e.error_dict(self.hs.config)
errordict = e.error_dict()
creds = await self.store.get_completed_ui_auth_stages(session.session_id)
for f in flows:

View File

@@ -118,8 +118,8 @@ class DeviceWorkerHandler:
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
_update_device_from_client_ips(device, ips)
set_tag("device", str(device))
set_tag("ips", str(ips))
set_tag("device", device)
set_tag("ips", ips)
return device
@@ -170,7 +170,7 @@ class DeviceWorkerHandler:
"""
set_tag("user_id", user_id)
set_tag("from_token", str(from_token))
set_tag("from_token", from_token)
now_room_key = self.store.get_room_max_token()
room_ids = await self.store.get_rooms_for_user(user_id)
@@ -795,7 +795,7 @@ class DeviceListUpdater:
"""
set_tag("origin", origin)
set_tag("edu_content", str(edu_content))
set_tag("edu_content", edu_content)
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints

View File

@@ -15,7 +15,7 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple
import attr
from canonicaljson import encode_canonical_json
@@ -92,11 +92,7 @@ class E2eKeysHandler:
@trace
async def query_devices(
self,
query_body: JsonDict,
timeout: int,
from_user_id: str,
from_device_id: Optional[str],
self, query_body: JsonDict, timeout: int, from_user_id: str, from_device_id: str
) -> JsonDict:
"""Handle a device key query from a client
@@ -124,7 +120,9 @@ class E2eKeysHandler:
the number of in-flight queries at a time.
"""
async with self._query_devices_linearizer.queue((from_user_id, from_device_id)):
device_keys_query: Dict[str, List[str]] = query_body.get("device_keys", {})
device_keys_query: Dict[str, Iterable[str]] = query_body.get(
"device_keys", {}
)
# separate users by domain.
# make a map from domain to user_id to device_ids
@@ -138,8 +136,8 @@ class E2eKeysHandler:
else:
remote_queries[user_id] = device_ids
set_tag("local_key_query", str(local_query))
set_tag("remote_key_query", str(remote_queries))
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
# First get local devices.
# A map of destination -> failure response.
@@ -343,7 +341,7 @@ class E2eKeysHandler:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", str(failure))
set_tag("reason", failure)
return
@@ -394,7 +392,7 @@ class E2eKeysHandler:
@trace
async def query_local_devices(
self, query: Mapping[str, Optional[List[str]]]
self, query: Dict[str, Optional[List[str]]]
) -> Dict[str, Dict[str, dict]]:
"""Get E2E device keys for local users
@@ -405,7 +403,7 @@ class E2eKeysHandler:
Returns:
A map from user_id -> device_id -> device details
"""
set_tag("local_query", str(query))
set_tag("local_query", query)
local_query: List[Tuple[str, Optional[str]]] = []
result_dict: Dict[str, Dict[str, dict]] = {}
@@ -463,7 +461,7 @@ class E2eKeysHandler:
@trace
async def claim_one_time_keys(
self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int]
self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: int
) -> JsonDict:
local_query: List[Tuple[str, str, str]] = []
remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {}
@@ -477,8 +475,8 @@ class E2eKeysHandler:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = one_time_keys
set_tag("local_key_query", str(local_query))
set_tag("remote_key_query", str(remote_queries))
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
results = await self.store.claim_e2e_one_time_keys(local_query)
@@ -508,7 +506,7 @@ class E2eKeysHandler:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", str(failure))
set_tag("reason", failure)
await make_deferred_yieldable(
defer.gatherResults(
@@ -611,7 +609,7 @@ class E2eKeysHandler:
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", str(result))
set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
async def _upload_one_time_keys_for_user(

View File

@@ -14,7 +14,7 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Optional, cast
from typing import TYPE_CHECKING, Dict, Optional
from typing_extensions import Literal
@@ -97,7 +97,7 @@ class E2eRoomKeysHandler:
user_id, version, room_id, session_id
)
log_kv(cast(JsonDict, results))
log_kv(results)
return results
@trace

View File

@@ -59,7 +59,6 @@ from synapse.events.validator import EventValidator
from synapse.federation.federation_client import InvalidResponseError
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import NOT_SPAM
from synapse.replication.http.federation import (
@@ -181,7 +180,6 @@ class FederationHandler:
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
)
@trace
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@@ -548,9 +546,9 @@ class FederationHandler:
)
if ret.partial_state:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
# TODO(faster_joins): roll this back if we don't manage to start the
# background resync (eg process_remote_join fails)
# https://github.com/matrix-org/synapse/issues/12998
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
try:
@@ -576,21 +574,17 @@ class FederationHandler:
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
# If the join failed, the background process is responsible for
# cleaning up — including unmarking the room as a partial state room.
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this
# room.
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
initial_destination=origin,
other_destinations=ret.servers_in_room,
room_id=room_id,
)
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this
# room.
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
initial_destination=origin,
other_destinations=ret.servers_in_room,
room_id=room_id,
)
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
@@ -754,23 +748,6 @@ class FederationHandler:
# (and return a 404 otherwise)
room_version = await self.store.get_room_version(room_id)
if await self.store.is_partial_state_room(room_id):
# If our server is still only partially joined, we can't give a complete
# response to /make_join, so return a 404 as we would if we weren't in the
# room at all.
# The main reason we can't respond properly is that we need to know about
# the auth events for the join event that we would return.
# We also should not bother entertaining the /make_join since we cannot
# handle the /send_join.
logger.info(
"Rejecting /make_join to %s because it's a partial state room", room_id
)
raise SynapseError(
404,
"Unable to handle /make_join right now; this server is not fully joined.",
errcode=Codes.NOT_FOUND,
)
# now check that we are *still* in the room
is_in_room = await self._event_auth_handler.check_host_in_room(
room_id, self.server_name
@@ -1562,16 +1539,15 @@ class FederationHandler:
# Make an infinite iterator of destinations to try. Once we find a working
# destination, we'll stick with it until it flakes.
destinations: Collection[str]
if initial_destination is not None:
# Move `initial_destination` to the front of the list.
destinations = list(other_destinations)
if initial_destination in destinations:
destinations.remove(initial_destination)
destinations = [initial_destination] + destinations
destination_iter = itertools.cycle(destinations)
else:
destinations = other_destinations
destination_iter = itertools.cycle(destinations)
destination_iter = itertools.cycle(other_destinations)
# `destination` is the current remote homeserver we're pulling from.
destination = next(destination_iter)

View File

@@ -59,7 +59,6 @@ from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
@@ -279,8 +278,7 @@ class FederationEventHandler:
)
try:
context = await self._state_handler.compute_event_context(pdu)
await self._process_received_pdu(origin, pdu, context)
await self._process_received_pdu(origin, pdu, state_ids=None)
except PartialStateConflictError:
# The room was un-partial stated while we were processing the PDU.
# Try once more, with full state this time.
@@ -288,8 +286,7 @@ class FederationEventHandler:
"Room %s was un-partial stated while processing the PDU, trying again.",
room_id,
)
context = await self._state_handler.compute_event_context(pdu)
await self._process_received_pdu(origin, pdu, context)
await self._process_received_pdu(origin, pdu, state_ids=None)
async def on_send_membership_event(
self, origin: str, event: EventBase
@@ -319,7 +316,6 @@ class FederationEventHandler:
The event and context of the event after inserting it into the room graph.
Raises:
RuntimeError if any prev_events are missing
SynapseError if the event is not accepted into the room
PartialStateConflictError if the room was un-partial stated in between
computing the state at the event and persisting it. The caller should
@@ -380,7 +376,7 @@ class FederationEventHandler:
# need to.
await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
await self._check_for_soft_fail(event, context=context, origin=origin)
await self._check_for_soft_fail(event, None, origin=origin)
await self._run_push_actions_and_persist_event(event, context)
return event, context
@@ -538,36 +534,32 @@ class FederationEventHandler:
#
# This is the same operation as we do when we receive a regular event
# over federation.
context = await self._compute_event_context_with_maybe_missing_prevs(
destination, event
state_ids = await self._resolve_state_at_missing_prevs(destination, event)
# build a new state group for it if need be
context = await self._state_handler.compute_event_context(
event,
state_ids_before_event=state_ids,
)
if context.partial_state:
# this can happen if some or all of the event's prev_events still have
# partial state. We were careful to only pick events from the db without
# partial-state prev events, so that implies that a prev event has
# been persisted (with partial state) since we did the query.
# partial state - ie, an event has an earlier stream_ordering than one
# or more of its prev_events, so we de-partial-state it before its
# prev_events.
#
# So, let's just ignore `event` for now; when we re-run the db query
# we should instead get its partial-state prev event, which we will
# de-partial-state, and then come back to event.
# TODO(faster_joins): we probably need to be more intelligent, and
# exclude partial-state prev_events from consideration
# https://github.com/matrix-org/synapse/issues/13001
logger.warning(
"%s still has prev_events with partial state: can't de-partial-state it yet",
"%s still has partial state: can't de-partial-state it yet",
event.event_id,
)
return
# since the state at this event has changed, we should now re-evaluate
# whether it should have been rejected. We must already have all of the
# auth events (from last time we went round this path), so there is no
# need to pass the origin.
await self._check_event_auth(None, event, context)
await self._store.update_state_for_partial_state_event(event, context)
self._state_storage_controller.notify_event_un_partial_stated(
event.event_id
)
@trace
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None:
@@ -612,7 +604,6 @@ class FederationEventHandler:
backfilled=True,
)
@trace
async def _get_missing_events_for_pdu(
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
) -> None:
@@ -713,7 +704,6 @@ class FederationEventHandler:
logger.info("Got %d prev_events", len(missing_events))
await self._process_pulled_events(origin, missing_events, backfilled=False)
@trace
async def _process_pulled_events(
self, origin: str, events: Iterable[EventBase], backfilled: bool
) -> None:
@@ -752,7 +742,6 @@ class FederationEventHandler:
with nested_logging_context(ev.event_id):
await self._process_pulled_event(origin, ev, backfilled=backfilled)
@trace
async def _process_pulled_event(
self, origin: str, event: EventBase, backfilled: bool
) -> None:
@@ -777,24 +766,10 @@ class FederationEventHandler:
"""
logger.info("Processing pulled event %s", event)
# This function should not be used to persist outliers (use something
# else) because this does a bunch of operations that aren't necessary
# (extra work; in particular, it makes sure we have all the prev_events
# and resolves the state across those prev events). If you happen to run
# into a situation where the event you're trying to process/backfill is
# marked as an `outlier`, then you should update that spot to return an
# `EventBase` copy that doesn't have `outlier` flag set.
#
# `EventBase` is used to represent both an event we have not yet
# persisted, and one that we have persisted and now keep in the cache.
# In an ideal world this method would only be called with the first type
# of event, but it turns out that's not actually the case and for
# example, you could get an event from cache that is marked as an
# `outlier` (fix up that spot though).
assert not event.internal_metadata.is_outlier(), (
"Outlier event passed to _process_pulled_event. "
"To persist an event as a non-outlier, make sure to pass in a copy without `event.internal_metadata.outlier = true`."
)
# these should not be outliers.
assert (
not event.internal_metadata.is_outlier()
), "pulled event unexpectedly flagged as outlier"
event_id = event.event_id
@@ -804,7 +779,7 @@ class FederationEventHandler:
if existing:
if not existing.internal_metadata.is_outlier():
logger.info(
"_process_pulled_event: Ignoring received event %s which we have already seen",
"Ignoring received event %s which we have already seen",
event_id,
)
return
@@ -817,55 +792,29 @@ class FederationEventHandler:
return
try:
try:
context = await self._compute_event_context_with_maybe_missing_prevs(
origin, event
)
await self._process_received_pdu(
origin,
event,
context,
backfilled=backfilled,
)
except PartialStateConflictError:
# The room was un-partial stated while we were processing the event.
# Try once more, with full state this time.
context = await self._compute_event_context_with_maybe_missing_prevs(
origin, event
)
state_ids = await self._resolve_state_at_missing_prevs(origin, event)
# TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
# not return partial state
# https://github.com/matrix-org/synapse/issues/13002
# We ought to have full state now, barring some unlikely race where we left and
# rejoned the room in the background.
if context.partial_state:
raise AssertionError(
f"Event {event.event_id} still has a partial resolved state "
f"after room {event.room_id} was un-partial stated"
)
await self._process_received_pdu(
origin,
event,
context,
backfilled=backfilled,
)
await self._process_received_pdu(
origin, event, state_ids=state_ids, backfilled=backfilled
)
except FederationError as e:
if e.code == 403:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
async def _compute_event_context_with_maybe_missing_prevs(
async def _resolve_state_at_missing_prevs(
self, dest: str, event: EventBase
) -> EventContext:
"""Build an EventContext structure for a non-outlier event whose prev_events may
be missing.
) -> Optional[StateMap[str]]:
"""Calculate the state at an event with missing prev_events.
This is used when we have pulled a batch of events from a remote server, and may
not have all the prev_events.
This is used when we have pulled a batch of events from a remote server, and
still don't have all the prev_events.
To build an EventContext, we need to calculate the state before the event. If we
already have all the prev_events for `event`, we can simply use the state after
the prev_events to calculate the state before `event`.
If we already have all the prev_events for `event`, this method does nothing.
Otherwise, the missing prevs become new backwards extremities, and we fall back
to asking the remote server for the state after each missing `prev_event`,
@@ -886,7 +835,8 @@ class FederationEventHandler:
event: an event to check for missing prevs.
Returns:
The event context.
if we already had all the prev events, `None`. Otherwise, returns
the event ids of the state at `event`.
Raises:
FederationError if we fail to get the state from the remote server after any
@@ -900,7 +850,7 @@ class FederationEventHandler:
missing_prevs = prevs - seen
if not missing_prevs:
return await self._state_handler.compute_event_context(event)
return None
logger.info(
"Event %s is missing prev_events %s: calculating state for a "
@@ -912,15 +862,9 @@ class FederationEventHandler:
# resolve them to find the correct state at the current event.
try:
# Determine whether we may be about to retrieve partial state
# Events may be un-partial stated right after we compute the partial state
# flag, but that's okay, as long as the flag errs on the conservative side.
partial_state_flags = await self._store.get_partial_state_events(seen)
partial_state = any(partial_state_flags.values())
# Get the state of the events we know about
ours = await self._state_storage_controller.get_state_groups_ids(
room_id, seen, await_full_state=False
room_id, seen
)
# state_maps is a list of mappings from (type, state_key) to event_id
@@ -966,9 +910,7 @@ class FederationEventHandler:
"We can't get valid state history.",
affected=event_id,
)
return await self._state_handler.compute_event_context(
event, state_ids_before_event=state_map, partial_state=partial_state
)
return state_map
async def _get_state_ids_after_missing_prev_event(
self,
@@ -1095,9 +1037,6 @@ class FederationEventHandler:
# XXX: this doesn't sound right? it means that we'll end up with incomplete
# state.
failed_to_fetch = desired_events - event_metadata.keys()
# `event_id` could be missing from `event_metadata` because it's not necessarily
# a state event. We've already checked that we've fetched it above.
failed_to_fetch.discard(event_id)
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state events for %s %s",
@@ -1137,7 +1076,7 @@ class FederationEventHandler:
self,
origin: str,
event: EventBase,
context: EventContext,
state_ids: Optional[StateMap[str]],
backfilled: bool = False,
) -> None:
"""Called when we have a new non-outlier event.
@@ -1159,18 +1098,24 @@ class FederationEventHandler:
event: event to be persisted
context: The `EventContext` to persist the event with.
state_ids: Normally None, but if we are handling a gap in the graph
(ie, we are missing one or more prev_events), the resolved state at the
event. Must not be partial state.
backfilled: True if this is part of a historical batch of events (inhibits
notification to clients, and validation of device keys.)
PartialStateConflictError: if the room was un-partial stated in between
computing the state at the event and persisting it. The caller should
recompute `context` and retry exactly once when this happens.
computing the state at the event and persisting it. The caller should retry
exactly once in this case. Will never be raised if `state_ids` is provided.
"""
logger.debug("Processing event: %s", event)
assert not event.internal_metadata.outlier
context = await self._state_handler.compute_event_context(
event,
state_ids_before_event=state_ids,
)
try:
await self._check_event_auth(origin, event, context)
except AuthError as e:
@@ -1182,7 +1127,7 @@ class FederationEventHandler:
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
await self._check_for_soft_fail(event, context=context, origin=origin)
await self._check_for_soft_fail(event, state_ids, origin=origin)
await self._run_push_actions_and_persist_event(event, context, backfilled)
@@ -1367,53 +1312,6 @@ class FederationEventHandler:
marker_event,
)
async def backfill_event_id(
self, destination: str, room_id: str, event_id: str
) -> EventBase:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
Args:
destination: The homeserver to pull the given event_id from.
room_id: The room where the event is from.
event_id: The event ID to backfill.
Raises:
FederationError if we are unable to find the event from the destination
"""
logger.info(
"backfill_event_id: event_id=%s from destination=%s", event_id, destination
)
room_version = await self._store.get_room_version(room_id)
event_from_response = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
if not event_from_response:
raise FederationError(
"ERROR",
404,
"Unable to find event_id=%s from destination=%s to backfill."
% (event_id, destination),
affected=event_id,
)
# Persist the event we just fetched, including pulling all of the state
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
destination,
[event_from_response],
# Prevent notifications going to clients
backfilled=True,
)
return event_from_response
async def _get_events_and_persist(
self, destination: str, room_id: str, event_ids: Collection[str]
) -> None:
@@ -1594,15 +1492,13 @@ class FederationEventHandler:
)
async def _check_event_auth(
self, origin: Optional[str], event: EventBase, context: EventContext
self, origin: str, event: EventBase, context: EventContext
) -> None:
"""
Checks whether an event should be rejected (for failing auth checks).
Args:
origin: The host the event originates from. This is used to fetch
any missing auth events. It can be set to None, but only if we are
sure that we already have all the auth events.
origin: The host the event originates from.
event: The event itself.
context:
The event context.
@@ -1745,27 +1641,17 @@ class FederationEventHandler:
async def _check_for_soft_fail(
self,
event: EventBase,
context: EventContext,
state_ids: Optional[StateMap[str]],
origin: str,
) -> None:
"""Checks if we should soft fail the event; if so, marks the event as
such.
Does nothing for events in rooms with partial state, since we may not have an
accurate membership event for the sender in the current state.
Args:
event
context: The `EventContext` which we are about to persist the event with.
state_ids: The state at the event if we don't have all the event's prev events
origin: The host the event originates from.
"""
if await self._store.is_partial_state_room(event.room_id):
# We might not know the sender's membership in the current state, so don't
# soft fail anything. Even if we do have a membership for the sender in the
# current state, it may have been derived from state resolution between
# partial and full state and may not be accurate.
return
extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids_list)
prev_event_ids = set(event.prev_event_ids())
@@ -1782,15 +1668,11 @@ class FederationEventHandler:
auth_types = auth_types_for_event(room_version_obj, event)
# Calculate the "current state".
seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids)
has_missing_prevs = bool(prev_event_ids - seen_event_ids)
if has_missing_prevs:
# We don't have all the prev_events of this event, which means we have a
# gap in the graph, and the new event is going to become a new backwards
# extremity.
#
# In this case we want to be a little careful as we might have been
# down for a while and have an incorrect view of the current state,
if state_ids is not None:
# If we're explicitly given the state then we won't have all the
# prev events, and so we have a gap in the graph. In this case
# we want to be a little careful as we might have been down for
# a while and have an incorrect view of the current state,
# however we still want to do checks as gaps are easy to
# maliciously manufacture.
#
@@ -1803,7 +1685,6 @@ class FederationEventHandler:
event.room_id, extrem_ids
)
state_sets: List[StateMap[str]] = list(state_sets_d.values())
state_ids = await context.get_prev_state_ids()
state_sets.append(state_ids)
current_state_ids = (
await self._state_resolution_handler.resolve_events_with_store(
@@ -1853,7 +1734,7 @@ class FederationEventHandler:
event.internal_metadata.soft_failed = True
async def _load_or_fetch_auth_events_for_event(
self, destination: Optional[str], event: EventBase
self, destination: str, event: EventBase
) -> Collection[EventBase]:
"""Fetch this event's auth_events, from database or remote
@@ -1869,19 +1750,12 @@ class FederationEventHandler:
Args:
destination: where to send the /event_auth request. Typically the server
that sent us `event` in the first place.
If this is None, no attempt is made to load any missing auth events:
rather, an AssertionError is raised if there are any missing events.
event: the event whose auth_events we want
Returns:
all of the events listed in `event.auth_events_ids`, after deduplication
Raises:
AssertionError if some auth events were missing and no `destination` was
supplied.
AuthError if we were unable to fetch the auth_events for any reason.
"""
event_auth_event_ids = set(event.auth_event_ids())
@@ -1893,13 +1767,6 @@ class FederationEventHandler:
)
if not missing_auth_event_ids:
return event_auth_events.values()
if destination is None:
# this shouldn't happen: destination must be set unless we know we have already
# persisted the auth events.
raise AssertionError(
"_load_or_fetch_auth_events_for_event() called with no destination for "
"an event with missing auth_events"
)
logger.info(
"Event %s refers to unknown auth events %s: fetching auth chain",
@@ -2113,10 +1980,6 @@ class FederationEventHandler:
event, event_pos, max_stream_token, extra_users=extra_users
)
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
# TODO retrieve the previous state, and exclude join -> join transitions
self._notifier.notify_user_joined_room(event.event_id, event.room_id)
def _sanity_check_event(self, ev: EventBase) -> None:
"""
Do some early sanity checks of a received event

View File

@@ -26,7 +26,6 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http import RequestTimedOutError
from synapse.http.client import SimpleHttpClient
from synapse.http.site import SynapseRequest
@@ -416,48 +415,6 @@ class IdentityHandler:
return session_id
async def request_email_token(
self,
id_server: str,
email: str,
client_secret: str,
send_attempt: int,
next_link: Optional[str] = None,
) -> JsonDict:
"""
Request an external server send an email on our behalf for the purposes of threepid
validation.
Args:
id_server: The identity server to proxy to
email: The email to send the message to
client_secret: The unique client_secret sends by the user
send_attempt: Which attempt this is
next_link: A link to redirect the user to once they submit the token
Returns:
The json response body from the server
"""
params = {
"email": email,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
if next_link:
params["next_link"] = next_link
try:
data = await self.http_client.post_json_get_json(
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
params,
)
return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
async def requestMsisdnToken(
self,
id_server: str,
@@ -531,18 +488,7 @@ class IdentityHandler:
validation_session = None
# Try to validate as email
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
# Remote emails will only be used if a valid identity server is provided.
assert (
self.hs.config.registration.account_threepid_delegate_email is not None
)
# Ask our delegated email identity server
validation_session = await self.threepid_from_creds(
self.hs.config.registration.account_threepid_delegate_email,
threepid_creds,
)
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
if self.hs.config.email.can_verify_email:
# Get a validated session matching these details
validation_session = await self.store.get_threepid_validation_session(
"email", client_secret, sid=sid, validated=True

Some files were not shown because too many files have changed in this diff Show More