Compare commits
5 Commits
v1.60.0rc1
...
shay/mx_ma
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85b30abfde | ||
|
|
d809a4c8fb | ||
|
|
df621cbaa5 | ||
|
|
301b9cdfcc | ||
|
|
21002db229 |
@@ -6,6 +6,3 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
|
||||
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
|
||||
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
30
.github/workflows/docker.yml
vendored
30
.github/workflows/docker.yml
vendored
@@ -34,24 +34,32 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# TODO: consider using https://github.com/docker/metadata-action instead of this
|
||||
# custom magic
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=pep440,pattern={{raw}}
|
||||
run: |
|
||||
case "${GITHUB_REF}" in
|
||||
refs/heads/develop)
|
||||
tag=develop
|
||||
;;
|
||||
refs/heads/master|refs/heads/main)
|
||||
tag=latest
|
||||
;;
|
||||
refs/tags/*)
|
||||
tag=${GITHUB_REF#refs/tags/}
|
||||
;;
|
||||
*)
|
||||
tag=${GITHUB_SHA}
|
||||
;;
|
||||
esac
|
||||
echo "::set-output name=tag::$tag"
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
3
.github/workflows/latest_deps.yml
vendored
3
.github/workflows/latest_deps.yml
vendored
@@ -32,15 +32,12 @@ jobs:
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0b1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
|
||||
# `pip install matrix-synapse[all]` as closely as possible.
|
||||
- run: poetry update --no-dev
|
||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@@ -24,8 +24,6 @@ jobs:
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
|
||||
269
CHANGES.md
269
CHANGES.md
@@ -1,270 +1,3 @@
|
||||
Synapse 1.60.0rc1 (2022-05-24)
|
||||
==============================
|
||||
|
||||
This release of Synapse adds a unique index to the `state_group_edges` table, in
|
||||
order to prevent accidentally introducing duplicate information (for example,
|
||||
because a database backup was restored multiple times). If your Synapse database
|
||||
already has duplicate rows in this table, this could fail with an error and
|
||||
require manual remediation.
|
||||
|
||||
Additionally, the signature of the `check_event_for_spam` module callback has changed.
|
||||
The previous signature has been deprecated and remains working for now. Module authors
|
||||
should update their modules to use the new signature where possible.
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
|
||||
for more details.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
|
||||
- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
|
||||
- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
|
||||
- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
|
||||
- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
|
||||
- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
|
||||
- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
|
||||
- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
|
||||
- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
|
||||
- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
|
||||
- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
|
||||
- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
|
||||
- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
|
||||
- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
|
||||
- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
|
||||
- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
|
||||
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
|
||||
- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
|
||||
- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
|
||||
- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
|
||||
- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
|
||||
- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
|
||||
- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
|
||||
- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
|
||||
- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
|
||||
- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
|
||||
- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
|
||||
- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
|
||||
- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
|
||||
- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
|
||||
- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
|
||||
- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
|
||||
- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
|
||||
- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
|
||||
- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
|
||||
- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
|
||||
- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
|
||||
- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
|
||||
- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
|
||||
- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
|
||||
- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
|
||||
- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
|
||||
- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
|
||||
- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
|
||||
- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
|
||||
- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
|
||||
- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
|
||||
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
|
||||
- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
|
||||
- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
|
||||
- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
|
||||
- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
|
||||
- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
|
||||
- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
|
||||
- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
|
||||
- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
|
||||
- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
|
||||
- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
|
||||
- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
|
||||
- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
|
||||
- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
|
||||
|
||||
|
||||
Synapse 1.59.1 (2022-05-18)
|
||||
===========================
|
||||
|
||||
This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly.
|
||||
|
||||
Bugfixes
|
||||
----------------
|
||||
|
||||
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
|
||||
|
||||
|
||||
Synapse 1.59.0 (2022-05-17)
|
||||
===========================
|
||||
|
||||
Synapse 1.59 makes several changes that server administrators should be aware of:
|
||||
|
||||
- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||
- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details.
|
||||
|
||||
Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix DB performance regression introduced in Synapse 1.59.0rc2. ([\#12745](https://github.com/matrix-org/synapse/issues/12745))
|
||||
|
||||
|
||||
Synapse 1.59.0rc2 (2022-05-16)
|
||||
==============================
|
||||
|
||||
Note: this release candidate includes a performance regression which can cause database disruption. Other release candidates in the v1.59.0 series are not affected, and a fix will be included in the v1.59.0 final release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was rejected. ([\#12729](https://github.com/matrix-org/synapse/issues/12729))
|
||||
|
||||
|
||||
Synapse 1.59.0rc1 (2022-05-10)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507))
|
||||
- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670))
|
||||
- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406))
|
||||
- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452))
|
||||
- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654))
|
||||
- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526))
|
||||
- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601))
|
||||
- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273))
|
||||
- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544))
|
||||
- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570))
|
||||
- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580))
|
||||
- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594))
|
||||
- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633))
|
||||
- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657))
|
||||
- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541))
|
||||
- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536))
|
||||
- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579))
|
||||
- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621))
|
||||
- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627))
|
||||
- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
|
||||
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480))
|
||||
- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500))
|
||||
- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505))
|
||||
- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564))
|
||||
- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568))
|
||||
- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577))
|
||||
- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581))
|
||||
- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582))
|
||||
- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663))
|
||||
- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589))
|
||||
- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599))
|
||||
- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602))
|
||||
- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610))
|
||||
- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614))
|
||||
- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
|
||||
- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620))
|
||||
- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624))
|
||||
- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632))
|
||||
- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637))
|
||||
- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))
|
||||
- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665))
|
||||
- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556))
|
||||
- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612))
|
||||
|
||||
### Typechecking
|
||||
|
||||
- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356))
|
||||
- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485))
|
||||
- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531))
|
||||
- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576))
|
||||
- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608))
|
||||
- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650))
|
||||
- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666))
|
||||
- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667))
|
||||
- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671))
|
||||
|
||||
Synapse 1.58.1 (2022-05-05)
|
||||
===========================
|
||||
|
||||
This patch release includes a fix to the Debian packages, installing the
|
||||
`systemd` and `cache_memory` extra package groups, which were incorrectly
|
||||
omitted in v1.58.0. This primarily prevented Synapse from starting
|
||||
when the `systemd.journal.JournalHandler` log handler was configured.
|
||||
See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information.
|
||||
|
||||
Otherwise, no significant changes since 1.58.0.
|
||||
|
||||
|
||||
Synapse 1.58.0 (2022-05-03)
|
||||
===========================
|
||||
|
||||
As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
|
||||
|
||||
No significant changes since 1.58.0rc2.
|
||||
|
||||
|
||||
Synapse 1.58.0rc2 (2022-04-26)
|
||||
==============================
|
||||
|
||||
@@ -286,6 +19,8 @@ Internal Changes
|
||||
Synapse 1.58.0rc1 (2022-04-26)
|
||||
==============================
|
||||
|
||||
As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
|
||||
10
README.rst
10
README.rst
@@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
@@ -294,13 +294,13 @@ directory of your choice::
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||
environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
|
||||
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`
|
||||
for other installation methods.) Then ask poetry to create a virtual environment
|
||||
from the project and install Synapse's dependencies::
|
||||
|
||||
@@ -309,11 +309,11 @@ from the project and install Synapse's dependencies::
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||
(to stop, you can use `poetry run ./demo/stop.sh`)
|
||||
|
||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||
for more information.
|
||||
|
||||
1
changelog.d/12356.misc
Normal file
1
changelog.d/12356.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix scripts-dev to pass typechecking.
|
||||
1
changelog.d/12406.feature
Normal file
1
changelog.d/12406.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a module API to allow modules to change actions for existing push rules of local users.
|
||||
1
changelog.d/12480.misc
Normal file
1
changelog.d/12480.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time.
|
||||
1
changelog.d/12485.misc
Normal file
1
changelog.d/12485.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add some type hints to datastore.
|
||||
1
changelog.d/12505.misc
Normal file
1
changelog.d/12505.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests.
|
||||
1
changelog.d/12526.feature
Normal file
1
changelog.d/12526.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid.
|
||||
1
changelog.d/12531.misc
Normal file
1
changelog.d/12531.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused `# type: ignore`s.
|
||||
1
changelog.d/12541.docker
Normal file
1
changelog.d/12541.docker
Normal file
@@ -0,0 +1 @@
|
||||
Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments.
|
||||
1
changelog.d/12544.bugfix
Normal file
1
changelog.d/12544.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper.
|
||||
1
changelog.d/12564.misc
Normal file
1
changelog.d/12564.misc
Normal file
@@ -0,0 +1 @@
|
||||
Consistently check if an object is a `frozendict`.
|
||||
1
changelog.d/12572.feature
Normal file
1
changelog.d/12572.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add the Synapse function `types.map_username_to_mxid_localpart` to the Module API.
|
||||
@@ -66,18 +66,6 @@
|
||||
],
|
||||
"title": "Dashboards",
|
||||
"type": "dashboards"
|
||||
},
|
||||
{
|
||||
"asDropdown": false,
|
||||
"icon": "external link",
|
||||
"includeVars": false,
|
||||
"keepTime": false,
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Synapse Documentation",
|
||||
"tooltip": "Open Documentation",
|
||||
"type": "link",
|
||||
"url": "https://matrix-org.github.io/synapse/latest/"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
@@ -10901,4 +10889,4 @@
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 100
|
||||
}
|
||||
}
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -37,11 +37,7 @@ python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
--extras systemd \
|
||||
-o exported_requirements.txt
|
||||
poetry export --extras all --extras test -o exported_requirements.txt
|
||||
deactivate
|
||||
rm -rf "$TEMP_VENV"
|
||||
|
||||
|
||||
47
debian/changelog
vendored
47
debian/changelog
vendored
@@ -1,50 +1,3 @@
|
||||
matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.60.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 24 May 2022 12:05:01 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.59.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 18 May 2022 11:41:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.59.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 17 May 2022 10:26:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.59.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 16 May 2022 12:52:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.0~rc1) stable; urgency=medium
|
||||
|
||||
* Adjust how the `exported-requirements.txt` file is generated as part of
|
||||
the process of building these packages. This affects the package
|
||||
maintainers only; end-users are unaffected.
|
||||
* New Synapse release 1.59.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 May 2022 10:45:08 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.1) stable; urgency=medium
|
||||
|
||||
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
|
||||
were incorrectly omitted from the 1.58.0 package.
|
||||
* New Synapse release 1.58.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 May 2022 10:52:58 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0rc2.
|
||||
|
||||
@@ -12,7 +12,6 @@ export PYTHONPATH
|
||||
|
||||
echo "$PYTHONPATH"
|
||||
|
||||
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
|
||||
for port in 8080 8081 8082; do
|
||||
echo "Starting server on port $port... "
|
||||
|
||||
@@ -20,12 +19,10 @@ for port in 8080 8081 8082; do
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port || exit
|
||||
|
||||
# Generate the configuration for the homeserver at localhost:848x, note that
|
||||
# the homeserver name needs to match the HTTPS listening port for federation
|
||||
# to properly work..
|
||||
# Generate the configuration for the homeserver at localhost:848x.
|
||||
python3 -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
--server-name "localhost:$https_port" \
|
||||
--server-name "localhost:$port" \
|
||||
--config-path "$port.config" \
|
||||
--report-stats no
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN \
|
||||
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
|
||||
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
|
||||
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
|
||||
@@ -6,6 +6,12 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
FROM matrixdotorg/synapse-workers
|
||||
|
||||
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
|
||||
# custom CA.
|
||||
# We include this near the top of the file in order to cache the result.
|
||||
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
|
||||
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
|
||||
|
||||
# Install postgresql
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
|
||||
@@ -25,12 +31,16 @@ COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
# Copy the caddy config
|
||||
COPY conf-workers/caddy.complement.json /root/caddy.json
|
||||
|
||||
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
|
||||
COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
|
||||
|
||||
# Copy the entrypoint
|
||||
COPY conf-workers/start-complement-synapse-workers.sh /
|
||||
|
||||
# Expose nginx's listener ports
|
||||
# Expose caddy's listener ports
|
||||
EXPOSE 8008 8448
|
||||
|
||||
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
|
||||
|
||||
72
docker/complement/conf-workers/caddy.complement.json
Normal file
72
docker/complement/conf-workers/caddy.complement.json
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8448"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"{{ server_name }}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "localhost:8008"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"{{ server_name }}"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"module": "internal"
|
||||
}
|
||||
],
|
||||
"on_demand": true
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"local": {
|
||||
"name": "Complement CA",
|
||||
"root": {
|
||||
"certificate": "/complement/ca/ca.crt",
|
||||
"private_key": "/complement/ca/ca.key"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
7
docker/complement/conf-workers/caddy.supervisord.conf
Normal file
7
docker/complement/conf-workers/caddy.supervisord.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
[program:caddy]
|
||||
command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
|
||||
autorestart=unexpected
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
@@ -9,6 +9,9 @@ function log {
|
||||
echo "$d $@"
|
||||
}
|
||||
|
||||
# Replace the server name in the caddy config
|
||||
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
|
||||
|
||||
# Set the server name of the homeserver
|
||||
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
|
||||
|
||||
@@ -36,26 +39,6 @@ export SYNAPSE_WORKER_TYPES="\
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
# Add Complement's appservice registration directory, if there is one
|
||||
# (It can be absent when there are no application services in this test!)
|
||||
if [ -d /complement/appservice ]; then
|
||||
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
|
||||
fi
|
||||
|
||||
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
|
||||
# Note that both the key and certificate are in PEM format (not DER).
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
|
||||
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
|
||||
-subj "/CN=${SERVER_NAME}"
|
||||
|
||||
openssl x509 -req -in /conf/server.tls.csr \
|
||||
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
|
||||
-out /conf/server.tls.crt
|
||||
|
||||
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
|
||||
export SYNAPSE_TLS_KEY=/conf/server.tls.key
|
||||
|
||||
# Run the script that writes the necessary config files and starts supervisord, which in turn
|
||||
# starts everything else
|
||||
exec /configure_workers_and_start.py
|
||||
|
||||
@@ -5,12 +5,6 @@ enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
|
||||
## Registration ##
|
||||
|
||||
# Needed by Complement to register admin users
|
||||
# DO NOT USE in a production configuration! This should be a random secret.
|
||||
registration_shared_secret: complement
|
||||
|
||||
## Federation ##
|
||||
|
||||
# trust certs signed by Complement's CA
|
||||
@@ -59,18 +53,6 @@ rc_joins:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
@@ -87,18 +87,6 @@ rc_joins:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
@@ -9,22 +9,6 @@ server {
|
||||
listen 8008;
|
||||
listen [::]:8008;
|
||||
|
||||
{% if tls_cert_path is not none and tls_key_path is not none %}
|
||||
listen 8448 ssl;
|
||||
listen [::]:8448 ssl;
|
||||
|
||||
ssl_certificate {{ tls_cert_path }};
|
||||
ssl_certificate_key {{ tls_key_path }};
|
||||
|
||||
# Some directives from cipherlist.eu (fka cipherli.st):
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
|
||||
ssl_ecdh_curve secp384r1; # Requires nginx >= 1.1.0
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_tickets off; # Requires nginx >= 1.5.9
|
||||
{% endif %}
|
||||
|
||||
server_name localhost;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
|
||||
@@ -6,13 +6,4 @@
|
||||
redis:
|
||||
enabled: true
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
## Application Services ##
|
||||
# A list of application service config files to use.
|
||||
app_service_config_files:
|
||||
{%- for path in appservice_registrations %}
|
||||
- "{{ path }}"
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{{ shared_worker_config }}
|
||||
{{ shared_worker_config }}
|
||||
@@ -9,7 +9,7 @@ user=root
|
||||
files = /etc/supervisor/conf.d/*.conf
|
||||
|
||||
[program:nginx]
|
||||
command=/usr/local/bin/prefix-log /usr/sbin/nginx -g "daemon off;"
|
||||
command=/usr/sbin/nginx -g "daemon off;"
|
||||
priority=500
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
@@ -19,7 +19,7 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
@@ -29,7 +29,7 @@ username=redis
|
||||
autorestart=true
|
||||
|
||||
[program:synapse_main]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||
priority=10
|
||||
# Log startup failures to supervisord's stdout/err
|
||||
# Regular synapse logs will still go in the configured data directory
|
||||
|
||||
@@ -21,11 +21,6 @@
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
|
||||
# Nginx will be configured to serve TLS on port 8448.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -34,7 +29,6 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
|
||||
|
||||
import jinja2
|
||||
@@ -75,10 +69,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.appservice",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
|
||||
"shared_extra_conf": {"notify_appservices": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
@@ -494,23 +488,11 @@ def generate_worker_files(
|
||||
master_log_config = generate_worker_log_config(environ, "master", data_dir)
|
||||
shared_config["log_config"] = master_log_config
|
||||
|
||||
# Find application service registrations
|
||||
appservice_registrations = None
|
||||
appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
|
||||
if appservice_registration_dir:
|
||||
# Scan for all YAML files that should be application service registrations.
|
||||
appservice_registrations = [
|
||||
str(reg_path.resolve())
|
||||
for reg_path in Path(appservice_registration_dir).iterdir()
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
"/conf/shared.yaml.j2",
|
||||
"/conf/workers/shared.yaml",
|
||||
shared_worker_config=yaml.dump(shared_config),
|
||||
appservice_registrations=appservice_registrations,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -519,8 +501,6 @@ def generate_worker_files(
|
||||
"/etc/nginx/conf.d/matrix-synapse.conf",
|
||||
worker_locations=nginx_location_config,
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
|
||||
@@ -89,7 +89,6 @@
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
- [Synapse Architecture]()
|
||||
- [Cancellation](development/synapse_architecture/cancellation.md)
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
|
||||
@@ -289,7 +289,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
@@ -206,32 +206,7 @@ This means that we need to run our unit tests against PostgreSQL too. Our CI doe
|
||||
this automatically for pull requests and release candidates, but it's sometimes
|
||||
useful to reproduce this locally.
|
||||
|
||||
#### Using Docker
|
||||
|
||||
The easiest way to do so is to run Postgres via a docker container. In one
|
||||
terminal:
|
||||
|
||||
```shell
|
||||
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14
|
||||
```
|
||||
|
||||
If you see an error like
|
||||
|
||||
```
|
||||
docker: Error response from daemon: driver failed programming external connectivity on endpoint nice_ride (b57bbe2e251b70015518d00c9981e8cb8346b5c785250341a6c53e3c899875f1): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use.
|
||||
```
|
||||
|
||||
then something is already bound to port 5432. You're probably already running postgres locally.
|
||||
|
||||
Once you have a postgres server running, invoke `trial` in a second terminal:
|
||||
|
||||
```shell
|
||||
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_HOST=127.0.0.1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_POSTGRES_PASSWORD=mysecretpassword poetry run trial tests
|
||||
````
|
||||
|
||||
#### Using an existing Postgres installation
|
||||
|
||||
If you have postgres already installed on your system, you can run `trial` with the
|
||||
To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||
following environment variables matching your configuration:
|
||||
|
||||
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||
@@ -254,8 +229,8 @@ You don't need to specify the host, user, port or password if your Postgres
|
||||
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
|
||||
works without further arguments).
|
||||
|
||||
Your Postgres account needs to be able to create databases; see the postgres
|
||||
docs for [`ALTER ROLE`](https://www.postgresql.org/docs/current/sql-alterrole.html).
|
||||
Your Postgres account needs to be able to create databases.
|
||||
|
||||
|
||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||
|
||||
@@ -295,13 +270,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
||||
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
||||
```
|
||||
|
||||
To run a specific test, you can specify the whole name structure:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
||||
```
|
||||
|
||||
|
||||
@@ -422,8 +397,8 @@ same lightweight approach that the Linux Kernel
|
||||
[submitting patches process](
|
||||
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
|
||||
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)).
|
||||
This is a simple declaration that you wrote
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
```
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
|
||||
|
||||
The demo setup allows running three federation Synapse servers, with server
|
||||
names `localhost:8480`, `localhost:8481`, and `localhost:8482`.
|
||||
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
|
||||
|
||||
You can access them via any Matrix client over HTTP at `localhost:8080`,
|
||||
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
|
||||
@@ -20,10 +20,9 @@ and the servers are configured in a highly insecure way, including:
|
||||
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
|
||||
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
|
||||
|
||||
Note that when joining a public room on a different homeserver via "#foo:bar.net",
|
||||
then you are (in the current implementation) joining a room with room_id "foo".
|
||||
This means that it won't work if your homeserver already has a room with that
|
||||
name.
|
||||
Note that when joining a public room on a different HS via "#foo:bar.net", then
|
||||
you are (in the current impl) joining a room with room_id "foo". This means that
|
||||
it won't work if your HS already has a room with that name.
|
||||
|
||||
## Using the demo scripts
|
||||
|
||||
|
||||
@@ -1,392 +0,0 @@
|
||||
# Cancellation
|
||||
Sometimes, requests take a long time to service and clients disconnect
|
||||
before Synapse produces a response. To avoid wasting resources, Synapse
|
||||
can cancel request processing for select endpoints marked with the
|
||||
`@cancellable` decorator.
|
||||
|
||||
Synapse makes use of Twisted's `Deferred.cancel()` feature to make
|
||||
cancellation work. The `@cancellable` decorator does nothing by itself
|
||||
and merely acts as a flag, signalling to developers and other code alike
|
||||
that a method can be cancelled.
|
||||
|
||||
## Enabling cancellation for an endpoint
|
||||
1. Check that the endpoint method, and any `async` functions in its call
|
||||
tree handle cancellation correctly. See
|
||||
[Handling cancellation correctly](#handling-cancellation-correctly)
|
||||
for a list of things to look out for.
|
||||
2. Add the `@cancellable` decorator to the `on_GET/POST/PUT/DELETE`
|
||||
method. It's not recommended to make non-`GET` methods cancellable,
|
||||
since cancellation midway through some database updates is less
|
||||
likely to be handled correctly.
|
||||
|
||||
## Mechanics
|
||||
There are two stages to cancellation: downward propagation of a
|
||||
`cancel()` call, followed by upwards propagation of a `CancelledError`
|
||||
out of a blocked `await`.
|
||||
Both Twisted and asyncio have a cancellation mechanism.
|
||||
|
||||
| | Method | Exception | Exception inherits from |
|
||||
|---------------|---------------------|-----------------------------------------|-------------------------|
|
||||
| Twisted | `Deferred.cancel()` | `twisted.internet.defer.CancelledError` | `Exception` (!) |
|
||||
| asyncio | `Task.cancel()` | `asyncio.CancelledError` | `BaseException` |
|
||||
|
||||
### Deferred.cancel()
|
||||
When Synapse starts handling a request, it runs the async method
|
||||
responsible for handling it using `defer.ensureDeferred`, which returns
|
||||
a `Deferred`. For example:
|
||||
|
||||
```python
|
||||
def do_something() -> Deferred[None]:
|
||||
...
|
||||
|
||||
@cancellable
|
||||
async def on_GET() -> Tuple[int, JsonDict]:
|
||||
d = make_deferred_yieldable(do_something())
|
||||
await d
|
||||
return 200, {}
|
||||
|
||||
request = defer.ensureDeferred(on_GET())
|
||||
```
|
||||
|
||||
When a client disconnects early, Synapse checks for the presence of the
|
||||
`@cancellable` decorator on `on_GET`. Since `on_GET` is cancellable,
|
||||
`Deferred.cancel()` is called on the `Deferred` from
|
||||
`defer.ensureDeferred`, ie. `request`. Twisted knows which `Deferred`
|
||||
`request` is waiting on and passes the `cancel()` call on to `d`.
|
||||
|
||||
The `Deferred` being waited on, `d`, may have its own handling for
|
||||
`cancel()` and pass the call on to other `Deferred`s.
|
||||
|
||||
Eventually, a `Deferred` handles the `cancel()` call by resolving itself
|
||||
with a `CancelledError`.
|
||||
|
||||
### CancelledError
|
||||
The `CancelledError` gets raised out of the `await` and bubbles up, as
|
||||
per normal Python exception handling.
|
||||
|
||||
## Handling cancellation correctly
|
||||
In general, when writing code that might be subject to cancellation, two
|
||||
things must be considered:
|
||||
* The effect of `CancelledError`s raised out of `await`s.
|
||||
* The effect of `Deferred`s being `cancel()`ed.
|
||||
|
||||
Examples of code that handles cancellation incorrectly include:
|
||||
* `try-except` blocks which swallow `CancelledError`s.
|
||||
* Code that shares the same `Deferred`, which may be cancelled, between
|
||||
multiple requests.
|
||||
* Code that starts some processing that's exempt from cancellation, but
|
||||
uses a logging context from cancellable code. The logging context
|
||||
will be finished upon cancellation, while the uncancelled processing
|
||||
is still using it.
|
||||
|
||||
Some common patterns are listed below in more detail.
|
||||
|
||||
### `async` function calls
|
||||
Most functions in Synapse are relatively straightforward from a
|
||||
cancellation standpoint: they don't do anything with `Deferred`s and
|
||||
purely call and `await` other `async` functions.
|
||||
|
||||
An `async` function handles cancellation correctly if its own code
|
||||
handles cancellation correctly and all the async function it calls
|
||||
handle cancellation correctly. For example:
|
||||
```python
|
||||
async def do_two_things() -> None:
|
||||
check_something()
|
||||
await do_something()
|
||||
await do_something_else()
|
||||
```
|
||||
`do_two_things` handles cancellation correctly if `do_something` and
|
||||
`do_something_else` handle cancellation correctly.
|
||||
|
||||
That is, when checking whether a function handles cancellation
|
||||
correctly, its implementation and all its `async` function calls need to
|
||||
be checked, recursively.
|
||||
|
||||
As `check_something` is not `async`, it does not need to be checked.
|
||||
|
||||
### CancelledErrors
|
||||
Because Twisted's `CancelledError`s are `Exception`s, it's easy to
|
||||
accidentally catch and suppress them. Care must be taken to ensure that
|
||||
`CancelledError`s are allowed to propagate upwards.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except Exception:
|
||||
# `CancelledError` gets swallowed here.
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**OK**:
|
||||
```python
|
||||
try:
|
||||
check_something()
|
||||
# A `CancelledError` won't ever be raised here.
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except ValueError:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### defer.gatherResults
|
||||
`defer.gatherResults` produces a `Deferred` which:
|
||||
* broadcasts `cancel()` calls to every `Deferred` being waited on.
|
||||
* wraps the first exception it sees in a `FirstError`.
|
||||
|
||||
Together, this means that `CancelledError`s will be wrapped in
|
||||
a `FirstError` unless unwrapped. Such `FirstError`s are liable to be
|
||||
swallowed, so they must be unwrapped.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
async def do_something() -> None:
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults([...], consumeErrors=True)
|
||||
)
|
||||
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
# `FirstError(CancelledError)` gets swallowed here.
|
||||
logger.info(...)
|
||||
```
|
||||
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
async def do_something() -> None:
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults([...], consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Creation of `Deferred`s
|
||||
If a function creates a `Deferred`, the effect of cancelling it must be considered. `Deferred`s that get shared are likely to have unintended behaviour when cancelled.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
cache: Dict[str, Deferred[None]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
deferred = cache.get(room_id)
|
||||
if deferred is None:
|
||||
deferred = Deferred()
|
||||
cache[room_id] = deferred
|
||||
# `deferred` can have multiple waiters.
|
||||
# All of them will observe a `CancelledError`
|
||||
# if any one of them is cancelled.
|
||||
return make_deferred_yieldable(deferred)
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Dict[str, Deferred[None]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
deferred = cache.get(room_id)
|
||||
if deferred is None:
|
||||
deferred = Deferred()
|
||||
cache[room_id] = deferred
|
||||
# `deferred` will never be cancelled now.
|
||||
# A `CancelledError` will still come out of
|
||||
# the `await`.
|
||||
# `delay_cancellation` may also be used.
|
||||
return make_deferred_yieldable(stop_cancellation(deferred))
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Dict[str, List[Deferred[None]]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
if room_id not in cache:
|
||||
cache[room_id] = []
|
||||
# Each request gets its own `Deferred` to wait on.
|
||||
deferred = Deferred()
|
||||
cache[room_id]].append(deferred)
|
||||
return make_deferred_yieldable(deferred)
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
</table>
|
||||
|
||||
### Uncancelled processing
|
||||
Some `async` functions may kick off some `async` processing which is
|
||||
intentionally protected from cancellation, by `stop_cancellation` or
|
||||
other means. If the `async` processing inherits the logcontext of the
|
||||
request which initiated it, care must be taken to ensure that the
|
||||
logcontext is not finished before the `async` processing completes.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
# `do_something_else` will never be cancelled and
|
||||
# can outlive the `request-1` logging context.
|
||||
run_in_background(do_something_else, to_resolve)
|
||||
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
run_in_background(do_something_else, to_resolve)
|
||||
# We'll wait until `do_something_else` is
|
||||
# done before raising a `CancelledError`.
|
||||
await make_deferred_yieldable(
|
||||
delay_cancellation(cache.observe())
|
||||
)
|
||||
else:
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%">
|
||||
|
||||
**OK**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
# `do_something_else` will get its own independent
|
||||
# logging context. `request-1` will not count any
|
||||
# metrics from `do_something_else`.
|
||||
run_as_background_process(
|
||||
"do_something_else",
|
||||
do_something_else,
|
||||
to_resolve,
|
||||
)
|
||||
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
<td width="50%">
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
@@ -17,6 +17,9 @@ follows:
|
||||
}
|
||||
```
|
||||
|
||||
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
|
||||
will be removed in a future version of Synapse.
|
||||
|
||||
The `token` field should include the JSON web token with the following claims:
|
||||
|
||||
* A claim that encodes the local part of the user ID is required. By default,
|
||||
|
||||
@@ -11,29 +11,22 @@ The available spam checker callbacks are:
|
||||
### `check_event_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
_Signature extended to support Allow and Code in Synapse v1.60.0_
|
||||
_Boolean and string return value types deprecated in Synapse v1.60.0_
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event: "synapse.module_api.EventBase") -> Union["synapse.module_api.ALLOW", "synapse.module_api.error.Codes", str, bool]
|
||||
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
|
||||
```
|
||||
|
||||
Called when receiving an event from a client or via federation. The callback must return either:
|
||||
- `synapse.module_api.ALLOW`, to allow the operation. Other callbacks
|
||||
may still decide to reject it.
|
||||
- `synapse.api.Codes` to reject the operation with an error code. In case
|
||||
of doubt, `synapse.api.error.Codes.FORBIDDEN` is a good error code.
|
||||
- (deprecated) a `str` to reject the operation and specify an error message. Note that clients
|
||||
typically will not localize the error message to the user's preferred locale.
|
||||
- (deprecated) on `False`, behave as `ALLOW`. Deprecated as confusing, as some
|
||||
callbacks in expect `True` to allow and others `True` to reject.
|
||||
- (deprecated) on `True`, behave as `synapse.api.error.Codes.FORBIDDEN`. Deprecated as confusing, as
|
||||
some callbacks in expect `True` to allow and others `True` to reject.
|
||||
Called when receiving an event from a client or via federation. The callback must return
|
||||
either:
|
||||
- an error message string, to indicate the event must be rejected because of spam and
|
||||
give a rejection reason to forward to clients;
|
||||
- the boolean `True`, to indicate that the event is spammy, but not provide further details; or
|
||||
- the booelan `False`, to indicate that the event is not considered spammy.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `synapse.module_api.ALLOW`, Synapse falls through to the next one. The value of the
|
||||
first callback that does not return `synapse.module_api.ALLOW` will be used. If this happens, Synapse
|
||||
will not call any of the subsequent implementations of this callback.
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_join_room`
|
||||
|
||||
@@ -256,24 +249,6 @@ callback returns `False`, Synapse falls through to the next one. The value of th
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `should_drop_federated_event`
|
||||
|
||||
_First introduced in Synapse v1.60.0_
|
||||
|
||||
```python
|
||||
async def should_drop_federated_event(event: "synapse.events.EventBase") -> bool
|
||||
```
|
||||
|
||||
Called when checking whether a remote server can federate an event with us. **Returning
|
||||
`True` from this function will silently drop a federated event and split-brain our view
|
||||
of a room's DAG, and thus you shouldn't use this callback unless you know what you are
|
||||
doing.**
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the spam checker callback
|
||||
|
||||
@@ -159,7 +159,7 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
@@ -293,7 +293,7 @@ can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
@@ -322,10 +322,10 @@ oidc_providers:
|
||||
|
||||
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
|
||||
|
||||
1. Set up a project in the Google API Console (see
|
||||
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
|
||||
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
1. Set up a project in the Google API Console (see
|
||||
https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup).
|
||||
2. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
@@ -501,8 +501,8 @@ As well as the private key file, you will need:
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
|
||||
has more information on setting up SiWA.
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
@@ -535,8 +535,8 @@ needed to add OAuth2 capabilities to your Django projects. It supports
|
||||
|
||||
Configuration on Django's side:
|
||||
|
||||
1. Add an application: `https://example.com/admin/oauth2_provider/application/add/` and choose parameters like this:
|
||||
* `Redirect uris`: `https://synapse.example.com/_synapse/client/oidc/callback`
|
||||
1. Add an application: https://example.com/admin/oauth2_provider/application/add/ and choose parameters like this:
|
||||
* `Redirect uris`: https://synapse.example.com/_synapse/client/oidc/callback
|
||||
* `Client type`: `Confidential`
|
||||
* `Authorization grant type`: `Authorization code`
|
||||
* `Algorithm`: `HMAC with SHA-2 256`
|
||||
|
||||
@@ -35,8 +35,3 @@ See [the TCP replication documentation](tcp_replication.md).
|
||||
There are read-only version of the synapse storage layer in
|
||||
`synapse/replication/slave/storage` that use the response of the
|
||||
replication API to invalidate their caches.
|
||||
|
||||
### The TCP Replication Module
|
||||
Information about how the tcp replication module is structured, including how
|
||||
the classes interact, can be found in
|
||||
`synapse/replication/tcp/__init__.py`
|
||||
|
||||
@@ -289,7 +289,7 @@ presence:
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
#
|
||||
# keys: the key discovery API (/_matrix/key).
|
||||
# keys: the key discovery API (/_matrix/keys).
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
@@ -407,11 +407,6 @@ manhole_settings:
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -422,8 +417,6 @@ manhole_settings:
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
@@ -716,11 +709,11 @@ retention:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
@@ -730,12 +723,6 @@ retention:
|
||||
# A cache 'factor' is a multiplier that can be applied to each of
|
||||
# Synapse's caches in order to increase or decrease the maximum
|
||||
# number of entries that can be stored.
|
||||
#
|
||||
# The configuration for cache factors (caches.global_factor and
|
||||
# caches.per_cache_factors) can be reloaded while the application is running,
|
||||
# by sending a SIGHUP signal to the Synapse process. Changes to other parts of
|
||||
# the caching config will NOT be applied after a SIGHUP is received; a restart
|
||||
# is necessary.
|
||||
|
||||
# The number of events to cache in memory. Not affected by
|
||||
# caches.global_factor.
|
||||
@@ -784,24 +771,6 @@ caches:
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
@@ -2486,40 +2455,6 @@ push:
|
||||
#
|
||||
#encryption_enabled_by_default_for_room_type: invite
|
||||
|
||||
# Override the default power levels for rooms created on this server, per
|
||||
# room creation preset.
|
||||
#
|
||||
# The appropriate dictionary for the room preset will be applied on top
|
||||
# of the existing power levels content.
|
||||
#
|
||||
# Useful if you know that your users need special permissions in rooms
|
||||
# that they create (e.g. to send particular types of state events without
|
||||
# needing an elevated power level). This takes the same shape as the
|
||||
# `power_level_content_override` parameter in the /createRoom API, but
|
||||
# is applied before that parameter.
|
||||
#
|
||||
# Valid keys are some or all of `private_chat`, `trusted_private_chat`
|
||||
# and `public_chat`. Inside each of those should be any of the
|
||||
# properties allowed in `power_level_content_override` in the
|
||||
# /createRoom API. If any property is missing, its default value will
|
||||
# continue to be used. If any property is present, it will overwrite
|
||||
# the existing default completely (so if the `events` property exists,
|
||||
# the default event power levels will be ignored).
|
||||
#
|
||||
#default_power_level_content_override:
|
||||
# private_chat:
|
||||
# "events":
|
||||
# "com.example.myeventtype" : 0
|
||||
# "m.room.avatar": 50
|
||||
# "m.room.canonical_alias": 50
|
||||
# "m.room.encryption": 100
|
||||
# "m.room.history_visibility": 100
|
||||
# "m.room.name": 50
|
||||
# "m.room.power_levels": 100
|
||||
# "m.room.server_acl": 100
|
||||
# "m.room.tombstone": 100
|
||||
# "events_default": 1
|
||||
|
||||
|
||||
# Uncomment to allow non-server-admin users to create groups on this server
|
||||
#
|
||||
|
||||
@@ -62,6 +62,13 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
|
||||
|
||||
164
docs/upgrade.md
164
docs/upgrade.md
@@ -89,169 +89,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.60.0
|
||||
|
||||
## Adding a new unique index to `state_group_edges` could fail if your database is corrupted
|
||||
|
||||
This release of Synapse will add a unique index to the `state_group_edges` table, in order
|
||||
to prevent accidentally introducing duplicate information (for example, because a database
|
||||
backup was restored multiple times).
|
||||
|
||||
Duplicate rows being present in this table could cause drastic performance problems; see
|
||||
[issue 11779](https://github.com/matrix-org/synapse/issues/11779) for more details.
|
||||
|
||||
If your Synapse database already has had duplicate rows introduced into this table,
|
||||
this could fail, with either of these errors:
|
||||
|
||||
|
||||
**On Postgres:**
|
||||
```
|
||||
synapse.storage.background_updates - 623 - INFO - background_updates-0 - Adding index state_group_edges_unique_idx to state_group_edges
|
||||
synapse.storage.background_updates - 282 - ERROR - background_updates-0 - Error doing update
|
||||
...
|
||||
psycopg2.errors.UniqueViolation: could not create unique index "state_group_edges_unique_idx"
|
||||
DETAIL: Key (state_group, prev_state_group)=(2, 1) is duplicated.
|
||||
```
|
||||
(The numbers may be different.)
|
||||
|
||||
**On SQLite:**
|
||||
```
|
||||
synapse.storage.background_updates - 623 - INFO - background_updates-0 - Adding index state_group_edges_unique_idx to state_group_edges
|
||||
synapse.storage.background_updates - 282 - ERROR - background_updates-0 - Error doing update
|
||||
...
|
||||
sqlite3.IntegrityError: UNIQUE constraint failed: state_group_edges.state_group, state_group_edges.prev_state_group
|
||||
```
|
||||
|
||||
|
||||
<details>
|
||||
<summary><b>Expand this section for steps to resolve this problem</b></summary>
|
||||
|
||||
### On Postgres
|
||||
|
||||
Connect to your database with `psql`.
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
DELETE FROM state_group_edges WHERE (ctid, state_group, prev_state_group) IN (
|
||||
SELECT row_id, state_group, prev_state_group
|
||||
FROM (
|
||||
SELECT
|
||||
ctid AS row_id,
|
||||
MIN(ctid) OVER (PARTITION BY state_group, prev_state_group) AS min_row_id,
|
||||
state_group,
|
||||
prev_state_group
|
||||
FROM state_group_edges
|
||||
) AS t1
|
||||
WHERE row_id <> min_row_id
|
||||
);
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
|
||||
### On SQLite
|
||||
|
||||
At the command-line, use `sqlite3 path/to/your-homeserver-database.db`:
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
DELETE FROM state_group_edges WHERE (rowid, state_group, prev_state_group) IN (
|
||||
SELECT row_id, state_group, prev_state_group
|
||||
FROM (
|
||||
SELECT
|
||||
rowid AS row_id,
|
||||
MIN(rowid) OVER (PARTITION BY state_group, prev_state_group) AS min_row_id,
|
||||
state_group,
|
||||
prev_state_group
|
||||
FROM state_group_edges
|
||||
)
|
||||
WHERE row_id <> min_row_id
|
||||
);
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
|
||||
### For more details
|
||||
|
||||
[This comment on issue 11779](https://github.com/matrix-org/synapse/issues/11779#issuecomment-1131545970)
|
||||
has queries that can be used to check a database for this problem in advance.
|
||||
|
||||
</details>
|
||||
|
||||
## SpamChecker API's `check_event_for_spam` has a new signature.
|
||||
|
||||
The previous signature has been deprecated.
|
||||
|
||||
Whereas `check_event_for_spam` callbacks used to return `Union[str, bool]`, they should now return `Union["synapse.module_api.Allow", "synapse.module_api.errors.Codes"]`.
|
||||
|
||||
This is part of an ongoing refactoring of the SpamChecker API to make it less ambiguous and more powerful.
|
||||
|
||||
If your module implements `check_event_for_spam` as follows:
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event):
|
||||
if ...:
|
||||
# Event is spam
|
||||
return True
|
||||
# Event is not spam
|
||||
return False
|
||||
```
|
||||
|
||||
you should rewrite it as follows:
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event):
|
||||
if ...:
|
||||
# Event is spam, mark it as forbidden (you may use some more precise error
|
||||
# code if it is useful).
|
||||
return synapse.module_api.errors.Codes.FORBIDDEN
|
||||
# Event is not spam, mark it as `ALLOW`.
|
||||
return synapse.module_api.ALLOW
|
||||
```
|
||||
|
||||
# Upgrading to v1.59.0
|
||||
|
||||
## Device name lookup over federation has been disabled by default
|
||||
|
||||
The names of user devices are no longer visible to users on other homeservers by default.
|
||||
Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption.
|
||||
|
||||
To re-enable this functionality, set the
|
||||
[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation)
|
||||
homeserver config option to `true`.
|
||||
|
||||
|
||||
## Deprecation of the `synapse.app.appservice` and `synapse.app.user_dir` worker application types
|
||||
|
||||
The `synapse.app.appservice` worker application type allowed you to configure a
|
||||
single worker to use to notify application services of new events, as long
|
||||
as this functionality was disabled on the main process with `notify_appservices: False`.
|
||||
Further, the `synapse.app.user_dir` worker application type allowed you to configure
|
||||
a single worker to be responsible for updating the user directory, as long as this
|
||||
was disabled on the main process with `update_user_directory: False`.
|
||||
|
||||
To unify Synapse's worker types, the `synapse.app.appservice` worker application
|
||||
type and the `notify_appservices` configuration option have been deprecated.
|
||||
The `synapse.app.user_dir` worker application type and `update_user_directory`
|
||||
configuration option have also been deprecated.
|
||||
|
||||
To get the same functionality as was provided by the deprecated options, it's now recommended that the `synapse.app.generic_worker`
|
||||
worker application type is used and that the `notify_appservices_from_worker` and/or
|
||||
`update_user_directory_from_worker` options are set to the name of a worker.
|
||||
|
||||
For the time being, the old options can be used alongside the new options to make
|
||||
it easier to transition between the two configurations, however please note that:
|
||||
|
||||
- the options must not contradict each other (otherwise Synapse won't start); and
|
||||
- the `notify_appservices` and `update_user_directory` options will be removed in a future release of Synapse.
|
||||
|
||||
Please see the [*Notifying Application Services*][v1_59_notify_ases_from] and
|
||||
[*Updating the User Directory*][v1_59_update_user_dir] sections of the worker
|
||||
documentation for more information.
|
||||
|
||||
[v1_59_notify_ases_from]: workers.md#notifying-application-services
|
||||
[v1_59_update_user_dir]: workers.md#updating-the-user-directory
|
||||
|
||||
|
||||
# Upgrading to v1.58.0
|
||||
|
||||
## Groups/communities feature has been disabled by default
|
||||
@@ -259,7 +96,6 @@ documentation for more information.
|
||||
The non-standard groups/communities feature in Synapse has been disabled by default
|
||||
and will be removed in Synapse v1.61.0.
|
||||
|
||||
|
||||
# Upgrading to v1.57.0
|
||||
|
||||
## Changes to database schema for application services
|
||||
|
||||
@@ -28,7 +28,7 @@ See the following for how to decode the dense data available from the default lo
|
||||
| NNNN | Total time waiting for response to DB queries across all parallel DB work from this request |
|
||||
| OOOO | Count of DB transactions performed |
|
||||
| PPPP | Response body size |
|
||||
| QQQQ | Response status code<br/>Suffixed with `!` if the socket was closed before the response was generated.<br/>A `499!` status code indicates that Synapse also cancelled request processing after the socket was closed.<br/> |
|
||||
| QQQQ | Response status code (prefixed with ! if the socket was closed before the response was generated) |
|
||||
| RRRR | Request |
|
||||
| SSSS | User-agent |
|
||||
| TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) |
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
## Some useful SQL queries for Synapse Admins
|
||||
|
||||
## Size of full matrix db
|
||||
```sql
|
||||
SELECT pg_size_pretty( pg_database_size( 'matrix' ) );
|
||||
```
|
||||
|
||||
`SELECT pg_size_pretty( pg_database_size( 'matrix' ) );`
|
||||
### Result example:
|
||||
```
|
||||
pg_size_pretty
|
||||
@@ -12,19 +9,39 @@ pg_size_pretty
|
||||
6420 MB
|
||||
(1 row)
|
||||
```
|
||||
## Show top 20 larger rooms by state events count
|
||||
```sql
|
||||
SELECT r.name, s.room_id, s.current_state_events
|
||||
FROM room_stats_current s
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
ORDER BY current_state_events DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
and by state_group_events count:
|
||||
```sql
|
||||
SELECT rss.name, s.room_id, count(s.room_id) FROM state_groups_state s
|
||||
LEFT JOIN room_stats_state rss USING (room_id)
|
||||
GROUP BY s.room_id, rss.name
|
||||
ORDER BY count(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
plus same, but with join removed for performance reasons:
|
||||
```sql
|
||||
SELECT s.room_id, count(s.room_id) FROM state_groups_state s
|
||||
GROUP BY s.room_id
|
||||
ORDER BY count(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 larger tables by row count
|
||||
```sql
|
||||
SELECT relname, n_live_tup AS "rows"
|
||||
FROM pg_stat_user_tables
|
||||
SELECT relname, n_live_tup as rows
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY n_live_tup DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
This query is quick, but may be very approximate, for exact number of rows use:
|
||||
```sql
|
||||
SELECT COUNT(*) FROM <table_name>;
|
||||
```
|
||||
|
||||
This query is quick, but may be very approximate, for exact number of rows use `SELECT COUNT(*) FROM <table_name>`.
|
||||
### Result example:
|
||||
```
|
||||
state_groups_state - 161687170
|
||||
@@ -49,19 +66,46 @@ device_lists_stream - 326903
|
||||
user_directory_search - 316433
|
||||
```
|
||||
|
||||
## Show top 20 rooms by new events count in last 1 day:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, COUNT(e.event_id) cnt FROM events e
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000 GROUP BY e.room_id, r.name ORDER BY cnt DESC LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 users on homeserver by sent events (messages) at last month:
|
||||
```sql
|
||||
SELECT user_id, SUM(total_events)
|
||||
FROM user_stats_historical
|
||||
WHERE TO_TIMESTAMP(end_ts/1000) AT TIME ZONE 'UTC' > date_trunc('day', now() - interval '1 month')
|
||||
GROUP BY user_id
|
||||
ORDER BY SUM(total_events) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show last 100 messages from needed user, with room names:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json FROM events e
|
||||
LEFT JOIN event_json j USING (room_id)
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE sender = '@LOGIN:example.com'
|
||||
AND e.type = 'm.room.message'
|
||||
ORDER BY stream_ordering DESC
|
||||
LIMIT 100;
|
||||
```
|
||||
|
||||
## Show top 20 larger tables by storage size
|
||||
```sql
|
||||
SELECT nspname || '.' || relname AS "relation",
|
||||
pg_size_pretty(pg_total_relation_size(c.oid)) AS "total_size"
|
||||
FROM pg_class c
|
||||
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
|
||||
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
|
||||
FROM pg_class C
|
||||
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
|
||||
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
|
||||
AND c.relkind <> 'i'
|
||||
AND C.relkind <> 'i'
|
||||
AND nspname !~ '^pg_toast'
|
||||
ORDER BY pg_total_relation_size(c.oid) DESC
|
||||
ORDER BY pg_total_relation_size(C.oid) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
### Result example:
|
||||
```
|
||||
public.state_groups_state - 27 GB
|
||||
@@ -86,93 +130,8 @@ public.device_lists_remote_cache - 124 MB
|
||||
public.state_group_edges - 122 MB
|
||||
```
|
||||
|
||||
## Show top 20 larger rooms by state events count
|
||||
You get the same information when you use the
|
||||
[admin API](../../admin_api/rooms.md#list-room-api)
|
||||
and set parameter `order_by=state_events`.
|
||||
|
||||
```sql
|
||||
SELECT r.name, s.room_id, s.current_state_events
|
||||
FROM room_stats_current s
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
ORDER BY current_state_events DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
and by state_group_events count:
|
||||
```sql
|
||||
SELECT rss.name, s.room_id, COUNT(s.room_id)
|
||||
FROM state_groups_state s
|
||||
LEFT JOIN room_stats_state rss USING (room_id)
|
||||
GROUP BY s.room_id, rss.name
|
||||
ORDER BY COUNT(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
plus same, but with join removed for performance reasons:
|
||||
```sql
|
||||
SELECT s.room_id, COUNT(s.room_id)
|
||||
FROM state_groups_state s
|
||||
GROUP BY s.room_id
|
||||
ORDER BY COUNT(s.room_id) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 rooms by new events count in last 1 day:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, COUNT(e.event_id) cnt
|
||||
FROM events e
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE e.origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 day') * 1000
|
||||
GROUP BY e.room_id, r.name
|
||||
ORDER BY cnt DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show top 20 users on homeserver by sent events (messages) at last month:
|
||||
Caution. This query does not use any indexes, can be slow and create load on the database.
|
||||
```sql
|
||||
SELECT COUNT(*), sender
|
||||
FROM events
|
||||
WHERE (type = 'm.room.encrypted' OR type = 'm.room.message')
|
||||
AND origin_server_ts >= DATE_PART('epoch', NOW() - INTERVAL '1 month') * 1000
|
||||
GROUP BY sender
|
||||
ORDER BY COUNT(*) DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
## Show last 100 messages from needed user, with room names:
|
||||
```sql
|
||||
SELECT e.room_id, r.name, e.event_id, e.type, e.content, j.json
|
||||
FROM events e
|
||||
LEFT JOIN event_json j USING (room_id)
|
||||
LEFT JOIN room_stats_state r USING (room_id)
|
||||
WHERE sender = '@LOGIN:example.com'
|
||||
AND e.type = 'm.room.message'
|
||||
ORDER BY stream_ordering DESC
|
||||
LIMIT 100;
|
||||
```
|
||||
|
||||
## Show rooms with names, sorted by events in this rooms
|
||||
|
||||
**Sort and order with bash**
|
||||
```bash
|
||||
echo "SELECT event_json.room_id, room_stats_state.name FROM event_json, room_stats_state \
|
||||
WHERE room_stats_state.room_id = event_json.room_id" | psql -d synapse -h localhost -U synapse_user -t \
|
||||
| sort | uniq -c | sort -n
|
||||
```
|
||||
Documentation for `psql` command line parameters: https://www.postgresql.org/docs/current/app-psql.html
|
||||
|
||||
**Sort and order with SQL**
|
||||
```sql
|
||||
SELECT COUNT(*), event_json.room_id, room_stats_state.name
|
||||
FROM event_json, room_stats_state
|
||||
WHERE room_stats_state.room_id = event_json.room_id
|
||||
GROUP BY event_json.room_id, room_stats_state.name
|
||||
ORDER BY COUNT(*) DESC
|
||||
LIMIT 50;
|
||||
```
|
||||
|
||||
`echo "select event_json.room_id,room_stats_state.name from event_json,room_stats_state where room_stats_state.room_id=event_json.room_id" | psql synapse | sort | uniq -c | sort -n`
|
||||
### Result example:
|
||||
```
|
||||
9459 !FPUfgzXYWTKgIrwKxW:matrix.org | This Week in Matrix
|
||||
@@ -186,22 +145,12 @@ SELECT COUNT(*), event_json.room_id, room_stats_state.name
|
||||
```
|
||||
|
||||
## Lookup room state info by list of room_id
|
||||
You get the same information when you use the
|
||||
[admin API](../../admin_api/rooms.md#room-details-api).
|
||||
```sql
|
||||
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption,
|
||||
rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||
FROM room_stats_state rss
|
||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||
WHERE room_id IN ( WHERE room_id IN (
|
||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||
);
|
||||
```
|
||||
|
||||
## Show users and devices that have not been online for a while
|
||||
```sql
|
||||
SELECT user_id, device_id, user_agent, TO_TIMESTAMP(last_seen / 1000) AS "last_seen"
|
||||
FROM devices
|
||||
WHERE last_seen < DATE_PART('epoch', NOW() - INTERVAL '3 month') * 1000;
|
||||
```
|
||||
SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||
FROM room_stats_state rss
|
||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||
WHERE room_id IN (WHERE room_id IN (
|
||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||
)
|
||||
```
|
||||
@@ -23,14 +23,6 @@ followed by a letter. Letters have the following meanings:
|
||||
For example, setting `redaction_retention_period: 5m` would remove redacted
|
||||
messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
In addition, configuration options referring to size use the following suffixes:
|
||||
|
||||
* `M` = MiB, or 1,048,576 bytes
|
||||
* `K` = KiB, or 1024 bytes
|
||||
|
||||
For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes
|
||||
for a user avatar.
|
||||
|
||||
### YAML
|
||||
The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules
|
||||
apply if you want your config file to be read properly. A few helpful things to know:
|
||||
@@ -475,13 +467,13 @@ Sub-options for each listener include:
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
|
||||
* `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more.
|
||||
|
||||
* `federation`: the server-server API (/_matrix/federation). Also implies `media`, `keys`, `openid`
|
||||
|
||||
* `keys`: the key discovery API (/_matrix/key).
|
||||
* `keys`: the key discovery API (/_matrix/keys).
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
@@ -635,20 +627,6 @@ Example configuration:
|
||||
mau_trial_days: 5
|
||||
```
|
||||
---
|
||||
Config option: `mau_appservice_trial_days`
|
||||
|
||||
The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different
|
||||
trial number if the user was registered by an appservice. A value
|
||||
of 0 means no trial days are applied. Appservices not listed in this dictionary
|
||||
use the value of `mau_trial_days` instead.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
mau_appservice_trial_days:
|
||||
my_appservice_id: 3
|
||||
another_appservice_id: 6
|
||||
```
|
||||
---
|
||||
Config option: `mau_limit_alerting`
|
||||
|
||||
The option `mau_limit_alerting` is a means of limiting client-side alerting
|
||||
@@ -1057,13 +1035,13 @@ allow_profile_lookup_over_federation: false
|
||||
---
|
||||
Config option: `allow_device_name_lookup_over_federation`
|
||||
|
||||
Set this option to true to allow device display name lookup over federation. By default, the
|
||||
Federation API prevents other homeservers from obtaining the display names of any user devices
|
||||
Set this option to false to disable device display name lookup over federation. By default, the
|
||||
Federation API allows other homeservers to obtain device display names of any user
|
||||
on this homeserver.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
allow_device_name_lookup_over_federation: true
|
||||
allow_device_name_lookup_over_federation: false
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
@@ -1127,22 +1105,7 @@ Caching can be configured through the following sub-options:
|
||||
with intermittent connections, at the cost of higher memory usage.
|
||||
By default, this is zero, which means that sync responses are not cached
|
||||
at all.
|
||||
* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and
|
||||
`min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory
|
||||
usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu)
|
||||
to utilize this option, and all three of the options must be specified for this feature to work. This option
|
||||
defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work
|
||||
and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided.
|
||||
Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry
|
||||
durations.
|
||||
* `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted.
|
||||
They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option.
|
||||
* `target_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value
|
||||
for this option.
|
||||
* `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
from being emptied while Synapse is evicting due to memory. There is no default value for this option.
|
||||
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -1150,29 +1113,9 @@ caches:
|
||||
global_factor: 1.0
|
||||
per_cache_factors:
|
||||
get_users_who_share_room_with_user: 2.0
|
||||
expire_caches: false
|
||||
sync_response_cache_duration: 2m
|
||||
cache_autotuning:
|
||||
max_cache_memory_usage: 1024M
|
||||
target_cache_memory_usage: 758M
|
||||
min_cache_ttl: 5m
|
||||
```
|
||||
|
||||
### Reloading cache factors
|
||||
|
||||
The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) may be reloaded at any time by sending a
|
||||
[`SIGHUP`](https://en.wikipedia.org/wiki/SIGHUP) signal to Synapse using e.g.
|
||||
|
||||
```commandline
|
||||
kill -HUP [PID_OF_SYNAPSE_PROCESS]
|
||||
```
|
||||
|
||||
If you are running multiple workers, you must individually update the worker
|
||||
config file and send this signal to each worker process.
|
||||
|
||||
If you're using the [example systemd service](https://github.com/matrix-org/synapse/blob/develop/contrib/systemd/matrix-synapse.service)
|
||||
file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
|
||||
`systemctl reload matrix-synapse`.
|
||||
|
||||
---
|
||||
## Database ##
|
||||
Config options related to database settings.
|
||||
@@ -1207,7 +1150,7 @@ For more information on using Synapse with Postgres,
|
||||
see [here](../../postgres.md).
|
||||
|
||||
Example SQLite configuration:
|
||||
```yaml
|
||||
```
|
||||
database:
|
||||
name: sqlite3
|
||||
args:
|
||||
@@ -1215,7 +1158,7 @@ database:
|
||||
```
|
||||
|
||||
Example Postgres configuration:
|
||||
```yaml
|
||||
```
|
||||
database:
|
||||
name: psycopg2
|
||||
txn_limit: 10000
|
||||
@@ -1370,20 +1313,6 @@ This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
will count against the `rc_invites.per_room` limit, whereas
|
||||
client requests to [invite a single user to a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidinvite)
|
||||
will count against both the `rc_invites.per_user` and `rc_invites.per_room` limits.
|
||||
|
||||
Federation requests to invite a user will count against the `rc_invites.per_user`
|
||||
limit only, as Synapse presumes ratelimiting by room will be done by the sending server.
|
||||
|
||||
The `rc_invites.per_user` limit applies to the *receiver* of the invite, rather than the
|
||||
sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user
|
||||
cannot *receive* more than a burst of 5 invites at a time.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
rc_invites:
|
||||
@@ -1692,10 +1621,10 @@ Defaults to "en".
|
||||
Example configuration:
|
||||
```yaml
|
||||
url_preview_accept_language:
|
||||
- 'en-UK'
|
||||
- 'en-US;q=0.9'
|
||||
- 'fr;q=0.8'
|
||||
- '*;q=0.7'
|
||||
- en-UK
|
||||
- en-US;q=0.9
|
||||
- fr;q=0.8
|
||||
- *;q=0.7
|
||||
```
|
||||
----
|
||||
Config option: `oembed`
|
||||
@@ -3355,32 +3284,6 @@ room_list_publication_rules:
|
||||
room_id: "*"
|
||||
action: allow
|
||||
```
|
||||
|
||||
---
|
||||
Config option: `default_power_level_content_override`
|
||||
|
||||
The `default_power_level_content_override` option controls the default power
|
||||
levels for rooms.
|
||||
|
||||
Useful if you know that your users need special permissions in rooms
|
||||
that they create (e.g. to send particular types of state events without
|
||||
needing an elevated power level). This takes the same shape as the
|
||||
`power_level_content_override` parameter in the /createRoom API, but
|
||||
is applied before that parameter.
|
||||
|
||||
Note that each key provided inside a preset (for example `events` in the example
|
||||
below) will overwrite all existing defaults inside that key. So in the example
|
||||
below, newly-created private_chat rooms will have no rules for any event types
|
||||
except `com.example.foo`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
default_power_level_content_override:
|
||||
private_chat: { "events": { "com.example.foo" : 0 } }
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing ##
|
||||
Configuration options related to Opentracing support.
|
||||
@@ -3481,7 +3384,7 @@ stream_writers:
|
||||
typing: worker1
|
||||
```
|
||||
---
|
||||
Config option: `run_background_tasks_on`
|
||||
Config option: `run_background_task_on`
|
||||
|
||||
The worker that is used to run background tasks (e.g. cleaning up expired
|
||||
data). If not provided this defaults to the main process.
|
||||
|
||||
@@ -7,10 +7,10 @@ team.
|
||||
## Installing and using Synapse
|
||||
|
||||
This documentation covers topics for **installation**, **configuration** and
|
||||
**maintenance** of your Synapse process:
|
||||
**maintainence** of your Synapse process:
|
||||
|
||||
* Learn how to [install](setup/installation.md) and
|
||||
[configure](usage/configuration/config_documentation.md) your own instance, perhaps with [Single
|
||||
[configure](usage/configuration/index.html) your own instance, perhaps with [Single
|
||||
Sign-On](usage/configuration/user_authentication/index.html).
|
||||
|
||||
* See how to [upgrade](upgrade.md) between Synapse versions.
|
||||
@@ -65,7 +65,7 @@ following documentation:
|
||||
|
||||
Want to help keep Synapse going but don't know how to code? Synapse is a
|
||||
[Matrix.org Foundation](https://matrix.org) project. Consider becoming a
|
||||
supporter on [Liberapay](https://liberapay.com/matrixdotorg),
|
||||
supportor on [Liberapay](https://liberapay.com/matrixdotorg),
|
||||
[Patreon](https://patreon.com/matrixdotorg) or through
|
||||
[PayPal](https://paypal.me/matrixdotorg) via a one-time donation.
|
||||
|
||||
|
||||
@@ -251,8 +251,6 @@ information.
|
||||
# Presence requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
# User directory search requests
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
@@ -428,7 +426,7 @@ the shared configuration would include:
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
You might also wish to investigate the `update_user_directory` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
@@ -437,48 +435,6 @@ An example for a dedicated background worker instance:
|
||||
{{#include systemd-with-workers/workers/background_worker.yaml}}
|
||||
```
|
||||
|
||||
#### Updating the User Directory
|
||||
|
||||
You can designate one generic worker to update the user directory.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
update_user_directory_from_worker: worker_name
|
||||
```
|
||||
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
User directory updates allow REST endpoints matching the following regular
|
||||
expressions to work:
|
||||
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
The above endpoints can be routed to any worker, though you may choose to route
|
||||
it to the chosen user directory worker.
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.user_dir`
|
||||
worker application type.
|
||||
|
||||
|
||||
#### Notifying Application Services
|
||||
|
||||
You can designate one generic worker to send output traffic to Application Services.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
notify_appservices_from_worker: worker_name
|
||||
```
|
||||
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.appservice`
|
||||
worker application type.
|
||||
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
@@ -497,9 +453,6 @@ pusher_instances:
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||
`notify_appservices_from_worker` option instead.](#notifying-application-services)
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||
shared configuration file to stop the main synapse sending appservice notifications.
|
||||
@@ -567,9 +520,6 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
|
||||
|
||||
### `synapse.app.user_dir`
|
||||
|
||||
**Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
||||
`update_user_directory_from_worker` option instead.](#updating-the-user-directory)
|
||||
|
||||
Handles searches in the user directory. It can handle REST endpoints matching
|
||||
the following regular expressions:
|
||||
|
||||
|
||||
196
mypy.ini
196
mypy.ini
@@ -10,7 +10,6 @@ warn_unreachable = True
|
||||
warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -28,6 +27,9 @@ exclude = (?x)
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/databases/main/devices.py
|
||||
|synapse/storage/databases/main/event_federation.py
|
||||
|synapse/storage/databases/main/push_rule.py
|
||||
|synapse/storage/databases/main/roommember.py
|
||||
|synapse/storage/schema/
|
||||
|
||||
|tests/api/test_auth.py
|
||||
@@ -41,11 +43,16 @@ exclude = (?x)
|
||||
|tests/events/test_utils.py
|
||||
|tests/federation/test_federation_catch_up.py
|
||||
|tests/federation/test_federation_sender.py
|
||||
|tests/federation/test_federation_server.py
|
||||
|tests/federation/transport/test_knocking.py
|
||||
|tests/federation/transport/test_server.py
|
||||
|tests/handlers/test_typing.py
|
||||
|tests/http/federation/test_matrix_federation_agent.py
|
||||
|tests/http/federation/test_srv_resolver.py
|
||||
|tests/http/test_fedclient.py
|
||||
|tests/http/test_proxyagent.py
|
||||
|tests/http/test_servlet.py
|
||||
|tests/http/test_site.py
|
||||
|tests/logging/__init__.py
|
||||
|tests/logging/test_terse_json.py
|
||||
|tests/module_api/test_api.py
|
||||
@@ -54,9 +61,12 @@ exclude = (?x)
|
||||
|tests/push/test_push_rule_evaluator.py
|
||||
|tests/rest/client/test_transactions.py
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/scripts/test_new_matrix_user.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/state/test_v2.py
|
||||
|tests/storage/test_base.py
|
||||
|tests/storage/test_roommember.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_server.py
|
||||
|tests/test_state.py
|
||||
@@ -79,39 +89,131 @@ exclude = (?x)
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse._scripts.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.app.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.appservice.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.config.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.crypto.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.event_auth]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.events.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.federation.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.http.client]
|
||||
disallow_untyped_defs = False
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.http.matrixfederationclient]
|
||||
disallow_untyped_defs = False
|
||||
[mypy-synapse.http.server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.logging.opentracing]
|
||||
disallow_untyped_defs = False
|
||||
[mypy-synapse.logging.context]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.logging.scopecontextmanager]
|
||||
disallow_untyped_defs = False
|
||||
[mypy-synapse.metrics.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||
warn_unused_ignores = False
|
||||
|
||||
[mypy-synapse.module_api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.notifier]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.push.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.replication.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.server_notices.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.state.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.account_data]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.client_ips]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.e2e_room_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.end_to_end_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.event_push_actions]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.events_bg_updates]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.events_worker]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room_batch]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.stats]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.state_deltas]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.transactions]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.user_erasure_store]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.streams.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.server]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.storage.database]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.*]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -139,46 +241,98 @@ disallow_untyped_defs = True
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-bcrypt]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-constantly]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-daemonize]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-h11]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hiredis]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hyperlink]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-importlib_metadata.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-josepy.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Note: WIP stubs available at
|
||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||
[mypy-nacl.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-prometheus_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-redbaron.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-sentry_sdk]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-signedjson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-srvlookup.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-twisted.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
74
poetry.lock
generated
74
poetry.lock
generated
@@ -572,7 +572,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "0.950"
|
||||
version = "0.931"
|
||||
description = "Optional static typing for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -580,14 +580,13 @@ python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
tomli = ">=1.1.0"
|
||||
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
dmypy = ["psutil (>=4.0)"]
|
||||
python2 = ["typed-ast (>=1.4.0,<2)"]
|
||||
reports = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "mypy-extensions"
|
||||
@@ -599,14 +598,14 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy-zope"
|
||||
version = "0.3.7"
|
||||
version = "0.3.5"
|
||||
description = "Plugin for mypy to support zope interfaces"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
mypy = "0.950"
|
||||
mypy = "0.931"
|
||||
"zope.interface" = "*"
|
||||
"zope.schema" = "*"
|
||||
|
||||
@@ -1021,7 +1020,7 @@ jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
version = "1.5.7"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1371,7 +1370,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.0.15"
|
||||
version = "9.0.6"
|
||||
description = "Typing stubs for Pillow"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1546,7 +1545,7 @@ docs = ["sphinx", "repoze.sphinx.autointerface"]
|
||||
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
|
||||
|
||||
[extras]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis", "Pympler"]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis"]
|
||||
cache_memory = ["Pympler"]
|
||||
jwt = ["pyjwt"]
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
|
||||
@@ -1562,8 +1561,8 @@ url_preview = ["lxml"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "d39d5ac5d51c014581186b7691999b861058b569084c525523baf70b77f292b1"
|
||||
python-versions = "^3.7"
|
||||
content-hash = "3825cef058b8c9f520ef4b7acb92519be95db9a663a61c2e89a5fe431ed55655"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2090,37 +2089,34 @@ msgpack = [
|
||||
{file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
|
||||
]
|
||||
mypy = [
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
|
||||
{file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
|
||||
{file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
|
||||
{file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
|
||||
{file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
|
||||
{file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
|
||||
{file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
|
||||
{file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
|
||||
{file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
|
||||
{file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
|
||||
{file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
|
||||
{file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
|
||||
{file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
|
||||
{file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
|
||||
{file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c5b42d0815e15518b1f0990cff7a705805961613e701db60387e6fb663fe78a"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c89702cac5b302f0c5d33b172d2b55b5df2bede3344a2fbed99ff96bddb2cf00"},
|
||||
{file = "mypy-0.931-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:300717a07ad09525401a508ef5d105e6b56646f7942eb92715a1c8d610149714"},
|
||||
{file = "mypy-0.931-cp310-cp310-win_amd64.whl", hash = "sha256:7b3f6f557ba4afc7f2ce6d3215d5db279bcf120b3cfd0add20a5d4f4abdae5bc"},
|
||||
{file = "mypy-0.931-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1bf752559797c897cdd2c65f7b60c2b6969ffe458417b8d947b8340cc9cec08d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4365c60266b95a3f216a3047f1d8e3f895da6c7402e9e1ddfab96393122cc58d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-win_amd64.whl", hash = "sha256:1b65714dc296a7991000b6ee59a35b3f550e0073411ac9d3202f6516621ba66c"},
|
||||
{file = "mypy-0.931-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e839191b8da5b4e5d805f940537efcaa13ea5dd98418f06dc585d2891d228cf0"},
|
||||
{file = "mypy-0.931-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:50c7346a46dc76a4ed88f3277d4959de8a2bd0a0fa47fa87a4cde36fe247ac05"},
|
||||
{file = "mypy-0.931-cp37-cp37m-win_amd64.whl", hash = "sha256:d8f1ff62f7a879c9fe5917b3f9eb93a79b78aad47b533911b853a757223f72e7"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f9fe20d0872b26c4bba1c1be02c5340de1019530302cf2dcc85c7f9fc3252ae0"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1b06268df7eb53a8feea99cbfff77a6e2b205e70bf31743e786678ef87ee8069"},
|
||||
{file = "mypy-0.931-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c11003aaeaf7cc2d0f1bc101c1cc9454ec4cc9cb825aef3cafff8a5fdf4c799"},
|
||||
{file = "mypy-0.931-cp38-cp38-win_amd64.whl", hash = "sha256:d9d2b84b2007cea426e327d2483238f040c49405a6bf4074f605f0156c91a47a"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff3bf387c14c805ab1388185dd22d6b210824e164d4bb324b195ff34e322d166"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b56154f8c09427bae082b32275a21f500b24d93c88d69a5e82f3978018a0266"},
|
||||
{file = "mypy-0.931-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ca7f8c4b1584d63c9a0f827c37ba7a47226c19a23a753d52e5b5eddb201afcd"},
|
||||
{file = "mypy-0.931-cp39-cp39-win_amd64.whl", hash = "sha256:74f7eccbfd436abe9c352ad9fb65872cc0f1f0a868e9d9c44db0893440f0c697"},
|
||||
{file = "mypy-0.931-py3-none-any.whl", hash = "sha256:1171f2e0859cfff2d366da2c7092b06130f232c636a3f7301e3feb8b41f6377d"},
|
||||
{file = "mypy-0.931.tar.gz", hash = "sha256:0038b21890867793581e4cb0d810829f5fd4441aa75796b53033af3aa30430ce"},
|
||||
]
|
||||
mypy-extensions = [
|
||||
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
|
||||
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
|
||||
]
|
||||
mypy-zope = [
|
||||
{file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
|
||||
{file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
|
||||
{file = "mypy-zope-0.3.5.tar.gz", hash = "sha256:489e7da1c2af887f2cfe3496995fc247f296512b495b57817edddda9d22308f3"},
|
||||
{file = "mypy_zope-0.3.5-py3-none-any.whl", hash = "sha256:3bd0cc9a3e5933b02931af4b214ba32a4f4ff98adb30c979ce733857db91a18b"},
|
||||
]
|
||||
netaddr = [
|
||||
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
|
||||
@@ -2390,8 +2386,8 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
{file = "sentry-sdk-1.5.7.tar.gz", hash = "sha256:aa52da941c56b5a76fd838f8e9e92a850bf893a9eb1e33ffce6c21431d07ee30"},
|
||||
{file = "sentry_sdk-1.5.7-py2.py3-none-any.whl", hash = "sha256:411a8495bd18cf13038e5749e4710beb4efa53da6351f67b4c2f307c2d9b6d49"},
|
||||
]
|
||||
service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
@@ -2626,8 +2622,8 @@ types-opentracing = [
|
||||
{file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"},
|
||||
]
|
||||
types-pillow = [
|
||||
{file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
|
||||
{file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
|
||||
{file = "types-Pillow-9.0.6.tar.gz", hash = "sha256:79b350b1188c080c27558429f1e119e69c9f020b877a82df761d9283070e0185"},
|
||||
{file = "types_Pillow-9.0.6-py3-none-any.whl", hash = "sha256:bd1e0a844fc718398aa265bf50fcad550fc520cc54f80e5ffeb7b3226b3cc507"},
|
||||
]
|
||||
types-psycopg2 = [
|
||||
{file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.60.0rc1"
|
||||
version = "1.58.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -100,7 +100,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7.1"
|
||||
python = "^3.7"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -142,10 +142,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
||||
typing-extensions = ">=3.10.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -233,11 +231,10 @@ all = [
|
||||
"jaeger-client", "opentracing",
|
||||
# jwt
|
||||
"pyjwt",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache_memory
|
||||
"pympler",
|
||||
#redis
|
||||
"txredisapi", "hiredis"
|
||||
# omitted:
|
||||
# - cache_memory: this is an experimental option
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
@@ -251,8 +248,8 @@ flake8-bugbear = "==21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
mypy-zope = "*"
|
||||
mypy = "==0.931"
|
||||
mypy-zope = "==0.3.5"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
|
||||
@@ -43,10 +43,6 @@ fi
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc2716,msc3030"
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
# Build the workers docker image (from the base Synapse image).
|
||||
@@ -56,21 +52,10 @@ if [[ -n "$WORKERS" ]]; then
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
|
||||
# And provide some more configuration to complement.
|
||||
|
||||
# It can take quite a while to spin up a worker-mode Synapse for the first
|
||||
# time (the main problem is that we start 14 python processes for each test,
|
||||
# and complement likes to do two of them in parallel).
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120
|
||||
|
||||
# ... and it takes longer than 10m to run the whole suite.
|
||||
extra_test_args+=("-timeout=60m")
|
||||
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
|
||||
else
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Dockerfile
|
||||
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
test_tags="$test_tags,faster_joins"
|
||||
fi
|
||||
|
||||
# Build the Complement image from the Synapse image we just built.
|
||||
@@ -79,5 +64,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
go test -v -tags $test_tags -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "$@" ./tests/...
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import Callable, Optional, Type
|
||||
from mypy.nodes import ARG_NAMED_OPT
|
||||
from mypy.plugin import MethodSigContext, Plugin
|
||||
from mypy.typeops import bind_self
|
||||
from mypy.types import CallableType, NoneType, UnionType
|
||||
from mypy.types import CallableType, NoneType
|
||||
|
||||
|
||||
class SynapsePlugin(Plugin):
|
||||
@@ -72,20 +72,13 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
|
||||
# Third, we add an optional "on_invalidate" argument.
|
||||
#
|
||||
# This is a either
|
||||
# - a callable which accepts no input and returns nothing, or
|
||||
# - None.
|
||||
calltyp = UnionType(
|
||||
[
|
||||
NoneType(),
|
||||
CallableType(
|
||||
arg_types=[],
|
||||
arg_kinds=[],
|
||||
arg_names=[],
|
||||
ret_type=NoneType(),
|
||||
fallback=ctx.api.named_generic_type("builtins.function", []),
|
||||
),
|
||||
]
|
||||
# This is a callable which accepts no input and returns nothing.
|
||||
calltyp = CallableType(
|
||||
arg_types=[],
|
||||
arg_kinds=[],
|
||||
arg_names=[],
|
||||
ret_type=NoneType(),
|
||||
fallback=ctx.api.named_generic_type("builtins.function", []),
|
||||
)
|
||||
|
||||
arg_types.append(calltyp)
|
||||
@@ -102,7 +95,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
|
||||
|
||||
def plugin(version: str) -> Type[SynapsePlugin]:
|
||||
# This is the entry point of the plugin, and lets us deal with the fact
|
||||
# This is the entry point of the plugin, and let's us deal with the fact
|
||||
# that the mypy plugin interface is *not* stable by looking at the version
|
||||
# string.
|
||||
#
|
||||
|
||||
@@ -89,7 +89,13 @@ def prepare() -> None:
|
||||
"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
@@ -165,7 +171,9 @@ def prepare() -> None:
|
||||
assert not parsed_new_version.is_devrelease
|
||||
assert not parsed_new_version.is_postrelease
|
||||
|
||||
release_branch_name = get_release_branch_name(parsed_new_version)
|
||||
release_branch_name = (
|
||||
f"release-v{parsed_new_version.major}.{parsed_new_version.minor}"
|
||||
)
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
@@ -266,7 +274,13 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
"""Tags the release and generates a draft GitHub release"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
@@ -279,15 +293,6 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
if tag_name in repo.tags:
|
||||
raise click.ClickException(f"Tag {tag_name} already exists!\n")
|
||||
|
||||
# Check we're on the right release branch
|
||||
release_branch = get_release_branch_name(current_version)
|
||||
if repo.active_branch.name != release_branch:
|
||||
click.echo(
|
||||
f"Need to be on the release branch ({release_branch}) before tagging. "
|
||||
f"Currently on ({repo.active_branch.name})."
|
||||
)
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Get the appropriate changelogs and tag.
|
||||
changes = get_changes_for_version(current_version)
|
||||
|
||||
@@ -353,15 +358,21 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def publish(gh_token: str) -> None:
|
||||
"""Publish release on GitHub."""
|
||||
"""Publish release."""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
get_repo_and_check_clean_checkout()
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
if not click.confirm(f"Publish release {tag_name} on GitHub?", default=True):
|
||||
if not click.confirm(f"Publish {tag_name}?", default=True):
|
||||
return
|
||||
|
||||
# Publish the draft release
|
||||
@@ -395,13 +406,6 @@ def upload() -> None:
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
# Check we have the right tag checked out.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
tag = repo.tag(f"refs/tags/{tag_name}")
|
||||
if repo.head.commit != tag.commit:
|
||||
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
|
||||
click.get_current_context().abort()
|
||||
|
||||
pypi_asset_names = [
|
||||
f"matrix_synapse-{current_version}-py3-none-any.whl",
|
||||
f"matrix-synapse-{current_version}.tar.gz",
|
||||
@@ -434,7 +438,7 @@ def announce() -> None:
|
||||
f"""
|
||||
Hi everyone. Synapse {current_version} has just been released.
|
||||
|
||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \
|
||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\
|
||||
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
||||
[debs](https://packages.matrix.org/debian/) | \
|
||||
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
||||
@@ -465,21 +469,6 @@ def get_package_version() -> version.Version:
|
||||
return version.Version(version_string)
|
||||
|
||||
|
||||
def get_release_branch_name(version_number: version.Version) -> str:
|
||||
return f"release-v{version_number.major}.{version_number.minor}"
|
||||
|
||||
|
||||
def get_repo_and_check_clean_checkout() -> git.Repo:
|
||||
"""Get the project repo and check it's not got any uncommitted changes."""
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
return repo
|
||||
|
||||
|
||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||
"""Find the branch/ref, looking first locally then in the remote."""
|
||||
if ref_name in repo.references:
|
||||
|
||||
@@ -85,19 +85,12 @@ class SortedDict(Dict[_KT, _VT]):
|
||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||
# Mypy now reports the first overload as an error, because typeshed widened the type
|
||||
# of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in
|
||||
# https://github.com/python/typeshed/pull/6653
|
||||
# Since sorteddicts don't change the signature of `update` from that of `dict`, we
|
||||
# let the stubs for `update` inherit from the stubs for `dict`. (I suspect we could
|
||||
# do the same for many othe methods.) We leave the stubs commented to better track
|
||||
# how this file has evolved from the original stubs.
|
||||
# @overload
|
||||
# def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, **kwargs: _VT) -> None: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
|
||||
@@ -46,14 +46,14 @@ def main() -> None:
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
),
|
||||
required=True,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", None) or {}
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", None) or {}
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
|
||||
@@ -187,7 +187,7 @@ class Auth:
|
||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
ip_addr = request.getClientAddress().host
|
||||
ip_addr = request.getClientIP()
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
@@ -356,7 +356,7 @@ class Auth:
|
||||
return None, None, None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
ip_address = IPAddress(request.getClientIP())
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None, None
|
||||
|
||||
@@ -417,8 +417,7 @@ class Auth:
|
||||
"""
|
||||
|
||||
if rights == "access":
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
# first look in the database
|
||||
r = await self.store.get_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r.valid_until_ms
|
||||
@@ -435,8 +434,7 @@ class Auth:
|
||||
|
||||
return r
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon, so we check that here.
|
||||
# otherwise it needs to be a valid macaroon
|
||||
try:
|
||||
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||
|
||||
@@ -484,12 +482,8 @@ class Auth:
|
||||
TypeError,
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning(
|
||||
"Invalid access token in auth: %s %s.",
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(
|
||||
self, token: str, rights: str = "access"
|
||||
@@ -510,7 +504,10 @@ class Auth:
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
except Exception: # deserialize can throw more-or-less anything
|
||||
# The access token doesn't look like a macaroon.
|
||||
# doesn't look like a macaroon: treat it as an opaque token which
|
||||
# must be in the database.
|
||||
# TODO: it would be nice to get rid of this, but apparently some
|
||||
# people use access tokens which aren't macaroons
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
|
||||
@@ -65,8 +65,6 @@ class JoinRules:
|
||||
PRIVATE: Final = "private"
|
||||
# As defined for MSC3083.
|
||||
RESTRICTED: Final = "restricted"
|
||||
# As defined for MSC3787.
|
||||
KNOCK_RESTRICTED: Final = "knock_restricted"
|
||||
|
||||
|
||||
class RestrictedJoinRuleTypes:
|
||||
@@ -257,5 +255,7 @@ class GuestAccess:
|
||||
|
||||
class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
class ReadReceiptEventFields:
|
||||
MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden"
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
|
||||
import logging
|
||||
import typing
|
||||
from enum import Enum
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
@@ -31,11 +30,7 @@ if typing.TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Codes(str, Enum):
|
||||
"""
|
||||
All known error codes, as an enum of strings.
|
||||
"""
|
||||
|
||||
class Codes:
|
||||
UNRECOGNIZED = "M_UNRECOGNIZED"
|
||||
UNAUTHORIZED = "M_UNAUTHORIZED"
|
||||
FORBIDDEN = "M_FORBIDDEN"
|
||||
|
||||
@@ -19,7 +19,6 @@ from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -445,9 +444,9 @@ class Filter:
|
||||
return room_ids
|
||||
|
||||
async def _check_event_relations(
|
||||
self, events: Collection[FilterEvent]
|
||||
self, events: Iterable[FilterEvent]
|
||||
) -> List[FilterEvent]:
|
||||
# The event IDs to check, mypy doesn't understand the isinstance check.
|
||||
# The event IDs to check, mypy doesn't understand the ifinstance check.
|
||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||
event_ids_to_keep = set(
|
||||
await self._store.events_have_relations(
|
||||
|
||||
@@ -81,9 +81,6 @@ class RoomVersion:
|
||||
msc2716_historical: bool
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -102,7 +99,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -119,7 +115,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -136,7 +131,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -153,7 +147,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -170,7 +163,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -187,7 +179,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -204,7 +195,6 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -221,7 +211,6 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -238,7 +227,6 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -255,7 +243,6 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
@@ -272,24 +259,6 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -307,7 +276,6 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
RoomVersions.MSC3787,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,6 @@ from typing import (
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
import twisted
|
||||
from twisted.internet import defer, error, reactor as _reactor
|
||||
@@ -49,12 +48,9 @@ from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
import synapse.util.caches
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
@@ -85,12 +81,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# list of tuples of function, args list, kwargs dict
|
||||
_sighup_callbacks: List[
|
||||
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
||||
Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]
|
||||
] = []
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
||||
def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> None:
|
||||
"""
|
||||
Register a function to be called when a SIGHUP occurs.
|
||||
|
||||
@@ -98,9 +93,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
@@ -221,9 +214,7 @@ def redirect_stdio_to_logs() -> None:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
|
||||
def register_start(
|
||||
cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
|
||||
) -> None:
|
||||
def register_start(cb: Callable[..., Awaitable], *args: Any, **kwargs: Any) -> None:
|
||||
"""Register a callback with the reactor, to be called once it is running
|
||||
|
||||
This can be used to initialise parts of the system which require an asynchronous
|
||||
@@ -435,10 +426,6 @@ async def start(hs: "HomeServer") -> None:
|
||||
signal.signal(signal.SIGHUP, run_sighup)
|
||||
|
||||
register_sighup(refresh_certificate, hs)
|
||||
register_sighup(reload_cache_config, hs.config)
|
||||
|
||||
# Apply the cache config.
|
||||
hs.config.caches.resize_all_caches()
|
||||
|
||||
# Load the certificate from disk.
|
||||
refresh_certificate(hs)
|
||||
@@ -493,43 +480,6 @@ async def start(hs: "HomeServer") -> None:
|
||||
atexit.register(gc.freeze)
|
||||
|
||||
|
||||
def reload_cache_config(config: HomeServerConfig) -> None:
|
||||
"""Reload cache config from disk and immediately apply it.resize caches accordingly.
|
||||
|
||||
If the config is invalid, a `ConfigError` is logged and no changes are made.
|
||||
|
||||
Otherwise, this:
|
||||
- replaces the `caches` section on the given `config` object,
|
||||
- resizes all caches according to the new cache factors, and
|
||||
|
||||
Note that the following cache config keys are read, but not applied:
|
||||
- event_cache_size: used to set a max_size and _original_max_size on
|
||||
EventsWorkerStore._get_event_cache when it is created. We'd have to update
|
||||
the _original_max_size (and maybe
|
||||
- sync_response_cache_duration: would have to update the timeout_sec attribute on
|
||||
HomeServer -> SyncHandler -> ResponseCache.
|
||||
- track_memory_usage. This affects synapse.util.caches.TRACK_MEMORY_USAGE which
|
||||
influences Synapse's self-reported metrics.
|
||||
|
||||
Also, the HTTPConnectionPool in SimpleHTTPClient sets its maxPersistentPerHost
|
||||
parameter based on the global_factor. This won't be applied on a config reload.
|
||||
"""
|
||||
try:
|
||||
previous_cache_config = config.reload_config_section("caches")
|
||||
except ConfigError as e:
|
||||
logger.warning("Failed to reload cache config")
|
||||
for f in format_config_error(e):
|
||||
logger.warning(f)
|
||||
else:
|
||||
logger.debug(
|
||||
"New cache config. Was:\n %s\nNow:\n",
|
||||
previous_cache_config.__dict__,
|
||||
config.caches.__dict__,
|
||||
)
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
config.caches.resize_all_caches()
|
||||
|
||||
|
||||
def setup_sentry(hs: "HomeServer") -> None:
|
||||
"""Enable sentry integration, if enabled in configuration"""
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ def start(config_options: List[str]) -> None:
|
||||
config.logging.no_redirect_stdio = True
|
||||
|
||||
# Explicitly disable background processes
|
||||
config.worker.should_update_user_directory = False
|
||||
config.server.update_user_directory = False
|
||||
config.worker.run_background_tasks = False
|
||||
config.worker.start_pushers = False
|
||||
config.worker.pusher_shard_config.instances = []
|
||||
|
||||
@@ -441,6 +441,38 @@ def start(config_options: List[str]) -> None:
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.worker.worker_app == "synapse.app.appservice":
|
||||
if config.appservice.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the appservice to start since they will be disabled in the main config
|
||||
config.appservice.notify_appservices = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.appservice.notify_appservices = False
|
||||
|
||||
if config.worker.worker_app == "synapse.app.user_dir":
|
||||
if config.server.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.server.update_user_directory = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Dict, Iterable, List
|
||||
from typing import Dict, Iterable, Iterator, List
|
||||
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
|
||||
@@ -45,7 +45,7 @@ from synapse.app._base import (
|
||||
redirect_stdio_to_logs,
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
@@ -399,6 +399,38 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
|
||||
return hs
|
||||
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]:
|
||||
"""
|
||||
Formats a config error neatly
|
||||
|
||||
The idea is to format the immediate error, plus the "causes" of those errors,
|
||||
hopefully in a way that makes sense to the user. For example:
|
||||
|
||||
Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template':
|
||||
Failed to parse config for module 'JinjaOidcMappingProvider':
|
||||
invalid jinja template:
|
||||
unexpected end of template, expected 'end of print statement'.
|
||||
|
||||
Args:
|
||||
e: the error to be formatted
|
||||
|
||||
Returns: An iterator which yields string fragments to be formatted
|
||||
"""
|
||||
yield "Error in configuration"
|
||||
|
||||
if e.path:
|
||||
yield " at '%s'" % (".".join(e.path),)
|
||||
|
||||
yield ":\n %s" % (e.msg,)
|
||||
|
||||
parent_e = e.__cause__
|
||||
indent = 1
|
||||
while parent_e:
|
||||
indent += 1
|
||||
yield ":\n%s%s" % (" " * indent, str(parent_e))
|
||||
parent_e = parent_e.__cause__
|
||||
|
||||
|
||||
def run(hs: HomeServer) -> None:
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
|
||||
@@ -17,7 +17,6 @@ import urllib.parse
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
@@ -67,7 +66,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r: object, field: str) -> TypeGuard[JsonDict]:
|
||||
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
|
||||
@@ -16,18 +16,14 @@
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
@@ -44,8 +40,6 @@ import yaml
|
||||
|
||||
from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
"""Represents a problem parsing the configuration
|
||||
@@ -61,38 +55,6 @@ class ConfigError(Exception):
|
||||
self.path = path
|
||||
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]:
|
||||
"""
|
||||
Formats a config error neatly
|
||||
|
||||
The idea is to format the immediate error, plus the "causes" of those errors,
|
||||
hopefully in a way that makes sense to the user. For example:
|
||||
|
||||
Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template':
|
||||
Failed to parse config for module 'JinjaOidcMappingProvider':
|
||||
invalid jinja template:
|
||||
unexpected end of template, expected 'end of print statement'.
|
||||
|
||||
Args:
|
||||
e: the error to be formatted
|
||||
|
||||
Returns: An iterator which yields string fragments to be formatted
|
||||
"""
|
||||
yield "Error in configuration"
|
||||
|
||||
if e.path:
|
||||
yield " at '%s'" % (".".join(e.path),)
|
||||
|
||||
yield ":\n %s" % (e.msg,)
|
||||
|
||||
parent_e = e.__cause__
|
||||
indent = 1
|
||||
while parent_e:
|
||||
indent += 1
|
||||
yield ":\n%s%s" % (" " * indent, str(parent_e))
|
||||
parent_e = parent_e.__cause__
|
||||
|
||||
|
||||
# We split these messages out to allow packages to override with package
|
||||
# specific instructions.
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
|
||||
@@ -157,7 +119,7 @@ class Config:
|
||||
defined in subclasses.
|
||||
"""
|
||||
|
||||
section: ClassVar[str]
|
||||
section: str
|
||||
|
||||
def __init__(self, root_config: "RootConfig" = None):
|
||||
self.root = root_config
|
||||
@@ -347,12 +309,9 @@ class RootConfig:
|
||||
class, lower-cased and with "Config" removed.
|
||||
"""
|
||||
|
||||
config_classes: List[Type[Config]] = []
|
||||
|
||||
def __init__(self, config_files: Collection[str] = ()):
|
||||
# Capture absolute paths here, so we can reload config after we daemonize.
|
||||
self.config_files = [os.path.abspath(path) for path in config_files]
|
||||
config_classes = []
|
||||
|
||||
def __init__(self):
|
||||
for config_class in self.config_classes:
|
||||
if config_class.section is None:
|
||||
raise ValueError("%r requires a section name" % (config_class,))
|
||||
@@ -553,10 +512,12 @@ class RootConfig:
|
||||
object from parser.parse_args(..)`
|
||||
"""
|
||||
|
||||
obj = cls()
|
||||
|
||||
config_args = parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
obj = cls(config_files)
|
||||
|
||||
if not config_files:
|
||||
parser.error("Must supply a config file.")
|
||||
|
||||
@@ -666,7 +627,7 @@ class RootConfig:
|
||||
|
||||
generate_missing_configs = config_args.generate_missing_configs
|
||||
|
||||
obj = cls(config_files)
|
||||
obj = cls()
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
@@ -766,34 +727,6 @@ class RootConfig:
|
||||
) -> None:
|
||||
self.invoke_all("generate_files", config_dict, config_dir_path)
|
||||
|
||||
def reload_config_section(self, section_name: str) -> Config:
|
||||
"""Reconstruct the given config section, leaving all others unchanged.
|
||||
|
||||
This works in three steps:
|
||||
|
||||
1. Create a new instance of the relevant `Config` subclass.
|
||||
2. Call `read_config` on that instance to parse the new config.
|
||||
3. Replace the existing config instance with the new one.
|
||||
|
||||
:raises ValueError: if the given `section` does not exist.
|
||||
:raises ConfigError: for any other problems reloading config.
|
||||
|
||||
:returns: the previous config object, which no longer has a reference to this
|
||||
RootConfig.
|
||||
"""
|
||||
existing_config: Optional[Config] = getattr(self, section_name, None)
|
||||
if existing_config is None:
|
||||
raise ValueError(f"Unknown config section '{section_name}'")
|
||||
logger.info("Reloading config section '%s'", section_name)
|
||||
|
||||
new_config_data = read_config_files(self.config_files)
|
||||
new_config = type(existing_config)(self)
|
||||
new_config.read_config(new_config_data)
|
||||
setattr(self, section_name, new_config)
|
||||
|
||||
existing_config.root = None
|
||||
return existing_config
|
||||
|
||||
|
||||
def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]:
|
||||
"""Read the config files into a dict
|
||||
|
||||
@@ -1,19 +1,15 @@
|
||||
import argparse
|
||||
from typing import (
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import jinja2
|
||||
@@ -68,8 +64,6 @@ class ConfigError(Exception):
|
||||
self.msg = msg
|
||||
self.path = path
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]: ...
|
||||
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS: str
|
||||
MISSING_REPORT_STATS_SPIEL: str
|
||||
MISSING_SERVER_NAME: str
|
||||
@@ -123,8 +117,7 @@ class RootConfig:
|
||||
background_updates: background_updates.BackgroundUpdateConfig
|
||||
|
||||
config_classes: List[Type["Config"]] = ...
|
||||
config_files: List[str]
|
||||
def __init__(self, config_files: Collection[str] = ...) -> None: ...
|
||||
def __init__(self) -> None: ...
|
||||
def invoke_all(
|
||||
self, func_name: str, *args: Any, **kwargs: Any
|
||||
) -> MutableMapping[str, Any]: ...
|
||||
@@ -164,12 +157,6 @@ class RootConfig:
|
||||
def generate_missing_files(
|
||||
self, config_dict: dict, config_dir_path: str
|
||||
) -> None: ...
|
||||
@overload
|
||||
def reload_config_section(
|
||||
self, section_name: Literal["caches"]
|
||||
) -> cache.CacheConfig: ...
|
||||
@overload
|
||||
def reload_config_section(self, section_name: str) -> Config: ...
|
||||
|
||||
class Config:
|
||||
root: RootConfig
|
||||
|
||||
@@ -33,6 +33,7 @@ class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
|
||||
def generate_config_section(cls, **kwargs: Any) -> str:
|
||||
@@ -55,8 +56,7 @@ def load_appservices(
|
||||
) -> List[ApplicationService]:
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
|
||||
@@ -69,11 +69,11 @@ def _canonicalise_cache_name(cache_name: str) -> str:
|
||||
def add_resizable_cache(
|
||||
cache_name: str, cache_resize_callback: Callable[[float], None]
|
||||
) -> None:
|
||||
"""Register a cache whose size can dynamically change
|
||||
"""Register a cache that's size can dynamically change
|
||||
|
||||
Args:
|
||||
cache_name: A reference to the cache
|
||||
cache_resize_callback: A callback function that will run whenever
|
||||
cache_resize_callback: A callback function that will be ran whenever
|
||||
the cache needs to be resized
|
||||
"""
|
||||
# Some caches have '*' in them which we strip out.
|
||||
@@ -96,13 +96,6 @@ class CacheConfig(Config):
|
||||
section = "caches"
|
||||
_environ = os.environ
|
||||
|
||||
event_cache_size: int
|
||||
cache_factors: Dict[str, float]
|
||||
global_factor: float
|
||||
track_memory_usage: bool
|
||||
expiry_time_msec: Optional[int]
|
||||
sync_response_cache_duration: int
|
||||
|
||||
@staticmethod
|
||||
def reset() -> None:
|
||||
"""Resets the caches to their defaults. Used for tests."""
|
||||
@@ -122,12 +115,6 @@ class CacheConfig(Config):
|
||||
# A cache 'factor' is a multiplier that can be applied to each of
|
||||
# Synapse's caches in order to increase or decrease the maximum
|
||||
# number of entries that can be stored.
|
||||
#
|
||||
# The configuration for cache factors (caches.global_factor and
|
||||
# caches.per_cache_factors) can be reloaded while the application is running,
|
||||
# by sending a SIGHUP signal to the Synapse process. Changes to other parts of
|
||||
# the caching config will NOT be applied after a SIGHUP is received; a restart
|
||||
# is necessary.
|
||||
|
||||
# The number of events to cache in memory. Not affected by
|
||||
# caches.global_factor.
|
||||
@@ -176,24 +163,6 @@ class CacheConfig(Config):
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
@@ -205,21 +174,21 @@ class CacheConfig(Config):
|
||||
"""
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
"""Populate this config object with values from `config`.
|
||||
|
||||
This method does NOT resize existing or future caches: use `resize_all_caches`.
|
||||
We use two separate methods so that we can reject bad config before applying it.
|
||||
"""
|
||||
self.event_cache_size = self.parse_size(
|
||||
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
|
||||
)
|
||||
self.cache_factors = {}
|
||||
self.cache_factors: Dict[str, float] = {}
|
||||
|
||||
cache_config = config.get("caches") or {}
|
||||
self.global_factor = cache_config.get("global_factor", _DEFAULT_FACTOR_SIZE)
|
||||
self.global_factor = cache_config.get(
|
||||
"global_factor", properties.default_factor_size
|
||||
)
|
||||
if not isinstance(self.global_factor, (int, float)):
|
||||
raise ConfigError("caches.global_factor must be a number.")
|
||||
|
||||
# Set the global one so that it's reflected in new caches
|
||||
properties.default_factor_size = self.global_factor
|
||||
|
||||
# Load cache factors from the config
|
||||
individual_factors = cache_config.get("per_cache_factors") or {}
|
||||
if not isinstance(individual_factors, dict):
|
||||
@@ -261,7 +230,7 @@ class CacheConfig(Config):
|
||||
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
|
||||
|
||||
if expire_caches:
|
||||
self.expiry_time_msec = self.parse_duration(cache_entry_ttl)
|
||||
self.expiry_time_msec: Optional[int] = self.parse_duration(cache_entry_ttl)
|
||||
else:
|
||||
self.expiry_time_msec = None
|
||||
|
||||
@@ -281,38 +250,23 @@ class CacheConfig(Config):
|
||||
)
|
||||
self.expiry_time_msec = self.parse_duration(expiry_time)
|
||||
|
||||
self.cache_autotuning = cache_config.get("cache_autotuning")
|
||||
if self.cache_autotuning:
|
||||
max_memory_usage = self.cache_autotuning.get("max_cache_memory_usage")
|
||||
self.cache_autotuning["max_cache_memory_usage"] = self.parse_size(
|
||||
max_memory_usage
|
||||
)
|
||||
|
||||
target_mem_size = self.cache_autotuning.get("target_cache_memory_usage")
|
||||
self.cache_autotuning["target_cache_memory_usage"] = self.parse_size(
|
||||
target_mem_size
|
||||
)
|
||||
|
||||
min_cache_ttl = self.cache_autotuning.get("min_cache_ttl")
|
||||
self.cache_autotuning["min_cache_ttl"] = self.parse_duration(min_cache_ttl)
|
||||
|
||||
self.sync_response_cache_duration = self.parse_duration(
|
||||
cache_config.get("sync_response_cache_duration", 0)
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
# Store this function so that it can be called from other classes without
|
||||
# needing an instance of Config
|
||||
properties.resize_all_caches_func = self.resize_all_caches
|
||||
|
||||
def resize_all_caches(self) -> None:
|
||||
"""Ensure all cache sizes are up-to-date.
|
||||
"""Ensure all cache sizes are up to date
|
||||
|
||||
For each cache, run the mapped callback function with either
|
||||
a specific cache factor or the default, global one.
|
||||
"""
|
||||
# Set the global factor size, so that new caches are appropriately sized.
|
||||
properties.default_factor_size = self.global_factor
|
||||
|
||||
# Store this function so that it can be called from other classes without
|
||||
# needing an instance of CacheConfig
|
||||
properties.resize_all_caches_func = self.resize_all_caches
|
||||
|
||||
# block other threads from modifying _CACHES while we iterate it.
|
||||
with _CACHES_LOCK:
|
||||
for cache_name, callback in _CACHES.items():
|
||||
|
||||
@@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
|
||||
# MSC2716 (importing historical messages)
|
||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||
|
||||
# MSC2285 (private read receipts)
|
||||
# MSC2285 (hidden read receipts)
|
||||
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
@@ -81,6 +81,3 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||
|
||||
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
|
||||
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
|
||||
|
||||
@@ -46,7 +46,7 @@ class FederationConfig(Config):
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", False
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
@@ -81,11 +81,11 @@ class FederationConfig(Config):
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -110,6 +110,13 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
|
||||
|
||||
@@ -57,9 +57,9 @@ class OembedConfig(Config):
|
||||
"""
|
||||
# Whether to use the packaged providers.json file.
|
||||
if not oembed_config.get("disable_default_providers") or False:
|
||||
with pkg_resources.resource_stream("synapse", "res/providers.json") as s:
|
||||
providers = json.load(s)
|
||||
|
||||
providers = json.load(
|
||||
pkg_resources.resource_stream("synapse", "res/providers.json")
|
||||
)
|
||||
yield from self._parse_and_validate_provider(
|
||||
providers, config_path=("oembed",)
|
||||
)
|
||||
|
||||
@@ -43,8 +43,8 @@ class RegistrationConfig(Config):
|
||||
self.registration_requires_token = config.get(
|
||||
"registration_requires_token", False
|
||||
)
|
||||
self.enable_registration_token_3pid_bypass = config.get(
|
||||
"enable_registration_token_3pid_bypass", False
|
||||
self.enable_registration_token_3pid_bypasss = config.get(
|
||||
"enable_registration_token_3pid_bypasss", False
|
||||
)
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
|
||||
|
||||
@@ -63,19 +63,6 @@ class RoomConfig(Config):
|
||||
"Invalid value for encryption_enabled_by_default_for_room_type"
|
||||
)
|
||||
|
||||
self.default_power_level_content_override = config.get(
|
||||
"default_power_level_content_override",
|
||||
None,
|
||||
)
|
||||
if self.default_power_level_content_override is not None:
|
||||
for preset in self.default_power_level_content_override:
|
||||
if preset not in vars(RoomCreationPreset).values():
|
||||
raise ConfigError(
|
||||
"Unrecognised room preset %s in default_power_level_content_override"
|
||||
% preset
|
||||
)
|
||||
# We validate the actual overrides when we try to apply them.
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
## Rooms ##
|
||||
@@ -96,38 +83,4 @@ class RoomConfig(Config):
|
||||
# will also not affect rooms created by other servers.
|
||||
#
|
||||
#encryption_enabled_by_default_for_room_type: invite
|
||||
|
||||
# Override the default power levels for rooms created on this server, per
|
||||
# room creation preset.
|
||||
#
|
||||
# The appropriate dictionary for the room preset will be applied on top
|
||||
# of the existing power levels content.
|
||||
#
|
||||
# Useful if you know that your users need special permissions in rooms
|
||||
# that they create (e.g. to send particular types of state events without
|
||||
# needing an elevated power level). This takes the same shape as the
|
||||
# `power_level_content_override` parameter in the /createRoom API, but
|
||||
# is applied before that parameter.
|
||||
#
|
||||
# Valid keys are some or all of `private_chat`, `trusted_private_chat`
|
||||
# and `public_chat`. Inside each of those should be any of the
|
||||
# properties allowed in `power_level_content_override` in the
|
||||
# /createRoom API. If any property is missing, its default value will
|
||||
# continue to be used. If any property is present, it will overwrite
|
||||
# the existing default completely (so if the `events` property exists,
|
||||
# the default event power levels will be ignored).
|
||||
#
|
||||
#default_power_level_content_override:
|
||||
# private_chat:
|
||||
# "events":
|
||||
# "com.example.myeventtype" : 0
|
||||
# "m.room.avatar": 50
|
||||
# "m.room.canonical_alias": 50
|
||||
# "m.room.encryption": 100
|
||||
# "m.room.history_visibility": 100
|
||||
# "m.room.name": 50
|
||||
# "m.room.power_levels": 100
|
||||
# "m.room.server_acl": 100
|
||||
# "m.room.tombstone": 100
|
||||
# "events_default": 1
|
||||
"""
|
||||
|
||||
@@ -319,6 +319,10 @@ class ServerConfig(Config):
|
||||
self.presence_router_config,
|
||||
) = load_module(presence_router_config, ("presence", "presence_router"))
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
@@ -409,7 +413,6 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get("mau_trial_days", 0)
|
||||
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
|
||||
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
||||
|
||||
# How long to keep redacted events in the database in unredacted form
|
||||
@@ -996,7 +999,7 @@ class ServerConfig(Config):
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
#
|
||||
# keys: the key discovery API (/_matrix/key).
|
||||
# keys: the key discovery API (/_matrix/keys).
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
@@ -1102,11 +1105,6 @@ class ServerConfig(Config):
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -1117,8 +1115,6 @@ class ServerConfig(Config):
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, List, Union
|
||||
|
||||
import attr
|
||||
|
||||
@@ -43,13 +42,6 @@ synapse process before they can be run in a separate worker.
|
||||
Please add ``start_pushers: false`` to the main config
|
||||
"""
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
"""Helper for allowing parsing a string or list of strings to a config
|
||||
@@ -304,112 +296,6 @@ class WorkerConfig(Config):
|
||||
self.worker_name is None and background_tasks_instance == "master"
|
||||
) or self.worker_name == background_tasks_instance
|
||||
|
||||
self.should_notify_appservices = self._should_this_worker_perform_duty(
|
||||
config,
|
||||
legacy_master_option_name="notify_appservices",
|
||||
legacy_worker_app_name="synapse.app.appservice",
|
||||
new_option_name="notify_appservices_from_worker",
|
||||
)
|
||||
|
||||
self.should_update_user_directory = self._should_this_worker_perform_duty(
|
||||
config,
|
||||
legacy_master_option_name="update_user_directory",
|
||||
legacy_worker_app_name="synapse.app.user_dir",
|
||||
new_option_name="update_user_directory_from_worker",
|
||||
)
|
||||
|
||||
def _should_this_worker_perform_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
legacy_master_option_name: str,
|
||||
legacy_worker_app_name: str,
|
||||
new_option_name: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Figures out whether this worker should perform a certain duty.
|
||||
|
||||
This function is temporary and is only to deal with the complexity
|
||||
of allowing old, transitional and new configurations all at once.
|
||||
|
||||
Contradictions between the legacy and new part of a transitional configuration
|
||||
will lead to a ConfigError.
|
||||
|
||||
Parameters:
|
||||
config: The config dictionary
|
||||
legacy_master_option_name: The name of a legacy option, whose value is boolean,
|
||||
specifying whether it's the master that should handle a certain duty.
|
||||
e.g. "notify_appservices"
|
||||
legacy_worker_app_name: The name of a legacy Synapse worker application
|
||||
that would traditionally perform this duty.
|
||||
e.g. "synapse.app.appservice"
|
||||
new_option_name: The name of the new option, whose value is the name of a
|
||||
designated worker to perform the duty.
|
||||
e.g. "notify_appservices_from_worker"
|
||||
"""
|
||||
|
||||
# None means 'unspecified'; True means 'run here' and False means
|
||||
# 'don't run here'.
|
||||
new_option_should_run_here = None
|
||||
if new_option_name in config:
|
||||
designated_worker = config[new_option_name] or "master"
|
||||
new_option_should_run_here = (
|
||||
designated_worker == "master" and self.worker_name is None
|
||||
) or designated_worker == self.worker_name
|
||||
|
||||
legacy_option_should_run_here = None
|
||||
if legacy_master_option_name in config:
|
||||
run_on_master = bool(config[legacy_master_option_name])
|
||||
|
||||
legacy_option_should_run_here = (
|
||||
self.worker_name is None and run_on_master
|
||||
) or (self.worker_app == legacy_worker_app_name and not run_on_master)
|
||||
|
||||
# Suggest using the new option instead.
|
||||
logger.warning(
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED,
|
||||
legacy_master_option_name,
|
||||
new_option_name,
|
||||
)
|
||||
|
||||
if self.worker_app == legacy_worker_app_name and config.get(
|
||||
legacy_master_option_name, True
|
||||
):
|
||||
# As an extra bit of complication, we need to check that the
|
||||
# specialised worker is only used if the legacy config says the
|
||||
# master isn't performing the duties.
|
||||
raise ConfigError(
|
||||
f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n"
|
||||
f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n"
|
||||
f"The '{new_option_name}' option replaces '{legacy_master_option_name}'."
|
||||
)
|
||||
|
||||
if new_option_should_run_here is None and legacy_option_should_run_here is None:
|
||||
# Neither option specified; the fallback behaviour is to run on the main process
|
||||
return self.worker_name is None
|
||||
|
||||
if (
|
||||
new_option_should_run_here is not None
|
||||
and legacy_option_should_run_here is not None
|
||||
):
|
||||
# Both options specified; ensure they match!
|
||||
if new_option_should_run_here != legacy_option_should_run_here:
|
||||
update_worker_type = (
|
||||
" and set worker_app: synapse.app.generic_worker"
|
||||
if self.worker_app == legacy_worker_app_name
|
||||
else ""
|
||||
)
|
||||
# If the values conflict, we suggest the admin removes the legacy option
|
||||
# for simplicity.
|
||||
raise ConfigError(
|
||||
f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n"
|
||||
f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n"
|
||||
)
|
||||
|
||||
# We've already validated that these aren't conflicting; now just see if
|
||||
# either is True.
|
||||
# (By this point, these are either the same value or only one is not None.)
|
||||
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
## Workers ##
|
||||
|
||||
@@ -414,12 +414,7 @@ def _is_membership_change_allowed(
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif (
|
||||
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
|
||||
) or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
elif room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED:
|
||||
# This is the same as public, but the event must contain a reference
|
||||
# to the server who authorised the join. If the event does not contain
|
||||
# the proper content it is rejected.
|
||||
@@ -445,13 +440,8 @@ def _is_membership_change_allowed(
|
||||
if authorising_user_level < invite_level:
|
||||
raise AuthError(403, "Join event authorised by invalid server.")
|
||||
|
||||
elif (
|
||||
join_rule == JoinRules.INVITE
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
elif join_rule == JoinRules.INVITE or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
):
|
||||
if not caller_in_room and not caller_invited:
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
@@ -472,10 +462,7 @@ def _is_membership_change_allowed(
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
or join_rule != JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
if join_rule != JoinRules.KNOCK:
|
||||
raise AuthError(403, "You don't have permission to knock")
|
||||
elif target_user_id != event.user_id:
|
||||
raise AuthError(403, "You cannot knock for other users")
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import collections.abc
|
||||
import os
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -33,11 +32,9 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import Literal
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.types import JsonDict, RoomStreamToken
|
||||
from synapse.util.caches import intern_dict
|
||||
@@ -216,17 +213,10 @@ class _EventInternalMetadata:
|
||||
return self.outlier
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this event is an out-of-band membership.
|
||||
|
||||
OOB memberships are a special case of outlier events: they are membership events
|
||||
for federated rooms that we aren't full members of. Examples include invites
|
||||
received over federation, and rejections for such invites.
|
||||
|
||||
The concept of an OOB membership is needed because these events need to be
|
||||
processed as if they're new regular events (e.g. updating membership state in
|
||||
the database, relaying to clients via /sync, etc) despite being outliers.
|
||||
|
||||
See also https://matrix-org.github.io/synapse/develop/development/room-dag-concepts.html#out-of-band-membership-events.
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
rejection. This is needed as those events are marked as outliers, but
|
||||
they still need to be processed as if they're new events (e.g. updating
|
||||
invite state in the database, relaying to clients, etc).
|
||||
|
||||
(Added in synapse 0.99.0, so may be unreliable for events received before that)
|
||||
"""
|
||||
@@ -618,45 +608,3 @@ def make_event_from_dict(
|
||||
return event_type(
|
||||
event_dict, room_version, internal_metadata_dict or {}, rejected_reason
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _EventRelation:
|
||||
# The target event of the relation.
|
||||
parent_id: str
|
||||
# The relation type.
|
||||
rel_type: str
|
||||
# The aggregation key. Will be None if the rel_type is not m.annotation or is
|
||||
# not a string.
|
||||
aggregation_key: Optional[str]
|
||||
|
||||
|
||||
def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
|
||||
"""
|
||||
Attempt to parse relation information an event.
|
||||
|
||||
Returns:
|
||||
The event relation information, if it is valid. None, otherwise.
|
||||
"""
|
||||
relation = event.content.get("m.relates_to")
|
||||
if not relation or not isinstance(relation, collections.abc.Mapping):
|
||||
# No relation information.
|
||||
return None
|
||||
|
||||
# Relations must have a type and parent event ID.
|
||||
rel_type = relation.get("rel_type")
|
||||
if not isinstance(rel_type, str):
|
||||
return None
|
||||
|
||||
parent_id = relation.get("event_id")
|
||||
if not isinstance(parent_id, str):
|
||||
return None
|
||||
|
||||
# Annotations have a key field.
|
||||
aggregation_key = None
|
||||
if rel_type == RelationTypes.ANNOTATION:
|
||||
aggregation_key = relation.get("key")
|
||||
if not isinstance(aggregation_key, str):
|
||||
aggregation_key = None
|
||||
|
||||
return _EventRelation(parent_id, rel_type, aggregation_key)
|
||||
|
||||
@@ -22,16 +22,11 @@ from typing import (
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -45,10 +40,6 @@ GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
@@ -72,15 +63,13 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(
|
||||
f: Optional[Callable[P, R]]
|
||||
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
@@ -91,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||
for hook in presence_router_methods
|
||||
}
|
||||
@@ -158,11 +147,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
try:
|
||||
# Note: result is an object here, because we don't trust modules to
|
||||
# return the types they're supposed to.
|
||||
result: object = await delay_cancellation(callback(state_updates))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(state_updates)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -214,9 +199,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_interested_users and combine the results
|
||||
for callback in self._get_interested_users_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(user_id))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(user_id)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
@@ -15,16 +15,17 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import JsonDict, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage import Storage
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.storage.state import StateFilter
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
@@ -59,9 +60,6 @@ class EventContext:
|
||||
If ``state_group`` is None (ie, the event is an outlier),
|
||||
``state_group_before_event`` will always also be ``None``.
|
||||
|
||||
state_delta_due_to_event: If `state_group` and `state_group_before_event` are not None
|
||||
then this is the delta of the state between the two groups.
|
||||
|
||||
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
|
||||
None does not necessarily mean that ``state_group`` does not have
|
||||
a prev_group!
|
||||
@@ -80,47 +78,73 @@ class EventContext:
|
||||
app_service: If this event is being sent by a (local) application service, that
|
||||
app service.
|
||||
|
||||
_current_state_ids: The room state map, including this event - ie, the state
|
||||
in ``state_group``.
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
on-demand: it will be None until that happens.
|
||||
|
||||
_prev_state_ids: The room state map, excluding this event - ie, the state
|
||||
in ``state_group_before_event``. For a non-state
|
||||
event, this will be the same as _current_state_events.
|
||||
|
||||
Note that it is a completely different thing to prev_group!
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
|
||||
partial_state: if True, we may be storing this event with a temporary,
|
||||
incomplete state.
|
||||
"""
|
||||
|
||||
_storage: "Storage"
|
||||
rejected: Union[Literal[False], str] = False
|
||||
rejected: Union[bool, str] = False
|
||||
_state_group: Optional[int] = None
|
||||
state_group_before_event: Optional[int] = None
|
||||
_state_delta_due_to_event: Optional[StateMap[str]] = None
|
||||
prev_group: Optional[int] = None
|
||||
delta_ids: Optional[StateMap[str]] = None
|
||||
app_service: Optional[ApplicationService] = None
|
||||
|
||||
_current_state_ids: Optional[StateMap[str]] = None
|
||||
_prev_state_ids: Optional[StateMap[str]] = None
|
||||
|
||||
partial_state: bool = False
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
storage: "Storage",
|
||||
state_group: Optional[int],
|
||||
state_group_before_event: Optional[int],
|
||||
state_delta_due_to_event: Optional[StateMap[str]],
|
||||
current_state_ids: Optional[StateMap[str]],
|
||||
prev_state_ids: Optional[StateMap[str]],
|
||||
partial_state: bool,
|
||||
prev_group: Optional[int] = None,
|
||||
delta_ids: Optional[StateMap[str]] = None,
|
||||
) -> "EventContext":
|
||||
return EventContext(
|
||||
storage=storage,
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
state_group=state_group,
|
||||
state_group_before_event=state_group_before_event,
|
||||
state_delta_due_to_event=state_delta_due_to_event,
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
partial_state=partial_state,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier(
|
||||
storage: "Storage",
|
||||
) -> "EventContext":
|
||||
def for_outlier() -> "EventContext":
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(storage=storage)
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
@@ -133,14 +157,24 @@ class EventContext:
|
||||
The serialized event.
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
# of the DB on the other side. However, the other side can't figure out
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_ids = await self.get_prev_state_ids()
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.get_state_key(),
|
||||
"state_group": self._state_group,
|
||||
"state_group_before_event": self.state_group_before_event,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"state_delta_due_to_event": _encode_state_dict(
|
||||
self._state_delta_due_to_event
|
||||
),
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
"partial_state": self.partial_state,
|
||||
@@ -158,16 +192,16 @@ class EventContext:
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
context = EventContext(
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
storage=storage,
|
||||
prev_state_id=input["prev_state_id"],
|
||||
event_type=input["event_type"],
|
||||
event_state_key=input["event_state_key"],
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
prev_group=input["prev_group"],
|
||||
state_delta_due_to_event=_decode_state_dict(
|
||||
input["state_delta_due_to_event"]
|
||||
),
|
||||
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||
rejected=input["rejected"],
|
||||
partial_state=input.get("partial_state", False),
|
||||
@@ -197,9 +231,7 @@ class EventContext:
|
||||
|
||||
return self._state_group
|
||||
|
||||
async def get_current_state_ids(
|
||||
self, state_filter: Optional["StateFilter"] = None
|
||||
) -> Optional[StateMap[str]]:
|
||||
async def get_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""
|
||||
Gets the room state map, including this event - ie, the state in ``state_group``
|
||||
|
||||
@@ -207,9 +239,6 @@ class EventContext:
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Arg:
|
||||
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
|
||||
|
||||
Returns:
|
||||
Returns None if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
@@ -220,27 +249,15 @@ class EventContext:
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
assert self._state_delta_due_to_event is not None
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
|
||||
prev_state_ids = await self.get_prev_state_ids(state_filter)
|
||||
|
||||
if self._state_delta_due_to_event:
|
||||
prev_state_ids = dict(prev_state_ids)
|
||||
prev_state_ids.update(self._state_delta_due_to_event)
|
||||
|
||||
return prev_state_ids
|
||||
|
||||
async def get_prev_state_ids(
|
||||
self, state_filter: Optional["StateFilter"] = None
|
||||
) -> StateMap[str]:
|
||||
async def get_prev_state_ids(self) -> StateMap[str]:
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Args:
|
||||
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
|
||||
|
||||
Returns:
|
||||
Returns {} if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
@@ -248,10 +265,94 @@ class EventContext:
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
assert self.state_group_before_event is not None
|
||||
return await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group_before_event, state_filter
|
||||
await self._ensure_fetched()
|
||||
# There *should* be previous state IDs now.
|
||||
assert self._prev_state_ids is not None
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
Returns None if we haven't cached the state or if state_group is None
|
||||
(which happens when the associated event is an outlier).
|
||||
|
||||
Otherwise, returns the the current state IDs.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _AsyncEventContextImpl(EventContext):
|
||||
"""
|
||||
An implementation of EventContext which fetches _current_state_ids and
|
||||
_prev_state_ids from the database on demand.
|
||||
|
||||
Attributes:
|
||||
|
||||
_storage
|
||||
|
||||
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
|
||||
None if we haven't started calculating yet
|
||||
|
||||
_event_type: The type of the event the context is associated with.
|
||||
|
||||
_event_state_key: The state_key of the event the context is associated with.
|
||||
|
||||
_prev_state_id: If the event associated with the context is a state event,
|
||||
then `_prev_state_id` is the event_id of the state that was replaced.
|
||||
"""
|
||||
|
||||
# This needs to have a default as we're inheriting
|
||||
_storage: "Storage" = attr.ib(default=None)
|
||||
_prev_state_id: Optional[str] = attr.ib(default=None)
|
||||
_event_type: str = attr.ib(default=None)
|
||||
_event_state_key: Optional[str] = attr.ib(default=None)
|
||||
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
async def _fill_out_state(self) -> None:
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
# No state group means the event is an outlier. Usually the state_ids dicts are also
|
||||
# pre-set to empty dicts, but they get reset when the context is serialized, so set
|
||||
# them to empty dicts again here.
|
||||
self._current_state_ids = {}
|
||||
self._prev_state_ids = {}
|
||||
return
|
||||
|
||||
current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
# Set this separately so mypy knows current_state_ids is not None.
|
||||
self._current_state_ids = current_state_ids
|
||||
if self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
else:
|
||||
self._prev_state_ids = current_state_ids
|
||||
|
||||
|
||||
def _encode_state_dict(
|
||||
|
||||
@@ -27,13 +27,11 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import Allow, Decision, RegistrationBehaviour
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
@@ -41,21 +39,7 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[
|
||||
Union[
|
||||
Allow,
|
||||
Codes,
|
||||
# Deprecated
|
||||
bool,
|
||||
# Deprecated
|
||||
str,
|
||||
]
|
||||
],
|
||||
]
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
@@ -178,14 +162,8 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self, hs: "synapse.server.HomeServer") -> None:
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._should_drop_federated_event_callbacks: List[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = []
|
||||
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self._user_may_send_3pid_invite_callbacks: List[
|
||||
@@ -209,9 +187,6 @@ class SpamChecker:
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
should_drop_federated_event: Optional[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = None,
|
||||
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
|
||||
@@ -230,11 +205,6 @@ class SpamChecker:
|
||||
if check_event_for_spam is not None:
|
||||
self._check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
|
||||
if should_drop_federated_event is not None:
|
||||
self._should_drop_federated_event_callbacks.append(
|
||||
should_drop_federated_event
|
||||
)
|
||||
|
||||
if user_may_join_room is not None:
|
||||
self._user_may_join_room_callbacks.append(user_may_join_room)
|
||||
|
||||
@@ -270,7 +240,7 @@ class SpamChecker:
|
||||
|
||||
async def check_event_for_spam(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[Decision, str]:
|
||||
) -> Union[bool, str]:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -281,57 +251,11 @@ class SpamChecker:
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
- on `ALLOW`, the event is considered good (non-spammy) and should
|
||||
be let through. Other spamcheck filters may still reject it.
|
||||
- on `Code`, the event is considered spammy and is rejected with a specific
|
||||
error message/code.
|
||||
- on `str`, the event is considered spammy and the string is used as error
|
||||
message. This usage is generally discouraged as it doesn't support
|
||||
internationalization.
|
||||
True or a string if the event is spammy. If a string is returned it
|
||||
will be used as the error message returned to the user.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
res: Union[Decision, str, bool] = await delay_cancellation(
|
||||
callback(event)
|
||||
)
|
||||
if res is False or res is Allow.ALLOW:
|
||||
# This spam-checker accepts the event.
|
||||
# Other spam-checkers may reject it, though.
|
||||
continue
|
||||
elif res is True:
|
||||
# This spam-checker rejects the event with deprecated
|
||||
# return value `True`
|
||||
return Codes.FORBIDDEN
|
||||
else:
|
||||
# This spam-checker rejects the event either with a `str`
|
||||
# or with a `Codes`. In either case, we stop here.
|
||||
return res
|
||||
|
||||
# No spam-checker has rejected the event, let it pass.
|
||||
return Allow.ALLOW
|
||||
|
||||
async def should_drop_federated_event(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[bool, str]:
|
||||
"""Checks if a given federated event is considered "spammy" by this
|
||||
server.
|
||||
|
||||
If the server considers an event spammy, it will be silently dropped,
|
||||
and in doing so will split-brain our view of the room's DAG.
|
||||
|
||||
Args:
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
True if the event should be silently dropped
|
||||
"""
|
||||
for callback in self._should_drop_federated_event_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||
res: Union[bool, str] = await callback(event)
|
||||
if res:
|
||||
return res
|
||||
|
||||
@@ -352,13 +276,7 @@ class SpamChecker:
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_join_room = await delay_cancellation(
|
||||
callback(user_id, room_id, is_invited)
|
||||
)
|
||||
if may_join_room is False:
|
||||
if await callback(user_id, room_id, is_invited) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -379,13 +297,7 @@ class SpamChecker:
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_invite = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
if may_invite is False:
|
||||
if await callback(inviter_userid, invitee_userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -410,13 +322,7 @@ class SpamChecker:
|
||||
True if the user may send the invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_send_3pid_invite = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
if may_send_3pid_invite is False:
|
||||
if await callback(inviter_userid, medium, address, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -433,11 +339,7 @@ class SpamChecker:
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_create_room = await delay_cancellation(callback(userid))
|
||||
if may_create_room is False:
|
||||
if await callback(userid) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -457,13 +359,7 @@ class SpamChecker:
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_create_room_alias = await delay_cancellation(
|
||||
callback(userid, room_alias)
|
||||
)
|
||||
if may_create_room_alias is False:
|
||||
if await callback(userid, room_alias) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -481,11 +377,7 @@ class SpamChecker:
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||
if may_publish_room is False:
|
||||
if await callback(userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -506,13 +398,9 @@ class SpamChecker:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
res = await delay_cancellation(callback(user_profile.copy()))
|
||||
if res:
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
if await callback(user_profile.copy()):
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -540,12 +428,9 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
behaviour = await delay_cancellation(
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
behaviour = await (
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
@@ -587,10 +472,7 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
spam = await callback(file_wrapper, file_info)
|
||||
if spam:
|
||||
return True
|
||||
|
||||
|
||||
@@ -14,14 +14,12 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.errors import ModuleFailedException, SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import Requester, StateMap
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -265,11 +263,7 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_event_allowed_callbacks:
|
||||
try:
|
||||
res, replacement_data = await delay_cancellation(
|
||||
callback(event, state_events)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
res, replacement_data = await callback(event, state_events)
|
||||
except SynapseError as e:
|
||||
# FIXME: Being able to throw SynapseErrors is relied upon by
|
||||
# some modules. PR #10386 accidentally broke this ability.
|
||||
@@ -339,13 +333,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||
try:
|
||||
threepid_can_be_invited = await delay_cancellation(
|
||||
callback(medium, address, state_events)
|
||||
)
|
||||
if threepid_can_be_invited is False:
|
||||
if await callback(medium, address, state_events) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -372,13 +361,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||
try:
|
||||
visibility_can_be_modified = await delay_cancellation(
|
||||
callback(room_id, state_events, new_visibility)
|
||||
)
|
||||
if visibility_can_be_modified is False:
|
||||
if await callback(room_id, state_events, new_visibility) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -416,11 +400,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_shutdown_room_callbacks:
|
||||
try:
|
||||
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
|
||||
if can_shutdown_room is False:
|
||||
if await callback(user_id, room_id) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
@@ -441,13 +422,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_deactivate_user_callbacks:
|
||||
try:
|
||||
can_deactivate_user = await delay_cancellation(
|
||||
callback(user_id, by_admin)
|
||||
)
|
||||
if can_deactivate_user is False:
|
||||
if await callback(user_id, by_admin) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
|
||||
@@ -22,7 +22,6 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
@@ -427,12 +426,13 @@ class EventClientSerializer:
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
if event.event_id in bundle_aggregations:
|
||||
event_aggregations = bundle_aggregations.get(event.event_id)
|
||||
if event_aggregations:
|
||||
self._inject_bundled_aggregations(
|
||||
event,
|
||||
time_now,
|
||||
config,
|
||||
bundle_aggregations,
|
||||
event_aggregations,
|
||||
serialized_event,
|
||||
apply_edits=apply_edits,
|
||||
)
|
||||
@@ -471,7 +471,7 @@ class EventClientSerializer:
|
||||
event: EventBase,
|
||||
time_now: int,
|
||||
config: SerializeEventConfig,
|
||||
bundled_aggregations: Dict[str, "BundledAggregations"],
|
||||
aggregations: "BundledAggregations",
|
||||
serialized_event: JsonDict,
|
||||
apply_edits: bool,
|
||||
) -> None:
|
||||
@@ -481,37 +481,22 @@ class EventClientSerializer:
|
||||
event: The event being serialized.
|
||||
time_now: The current time in milliseconds
|
||||
config: Event serialization config
|
||||
bundled_aggregations: Bundled aggregations to be injected.
|
||||
A map from event_id to aggregation data. Must contain at least an
|
||||
entry for `event`.
|
||||
|
||||
While serializing the bundled aggregations this map may be searched
|
||||
again for additional events in a recursive manner.
|
||||
aggregations: The bundled aggregation to serialize.
|
||||
serialized_event: The serialized event which may be modified.
|
||||
apply_edits: Whether the content of the event should be modified to reflect
|
||||
any replacement in `aggregations.replace`.
|
||||
"""
|
||||
|
||||
# We have already checked that aggregations exist for this event.
|
||||
event_aggregations = bundled_aggregations[event.event_id]
|
||||
|
||||
# The JSON dictionary to be added under the unsigned property of the event
|
||||
# being serialized.
|
||||
serialized_aggregations = {}
|
||||
|
||||
if event_aggregations.annotations:
|
||||
serialized_aggregations[
|
||||
RelationTypes.ANNOTATION
|
||||
] = event_aggregations.annotations
|
||||
if aggregations.annotations:
|
||||
serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations
|
||||
|
||||
if event_aggregations.references:
|
||||
serialized_aggregations[
|
||||
RelationTypes.REFERENCE
|
||||
] = event_aggregations.references
|
||||
if aggregations.references:
|
||||
serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
|
||||
|
||||
if event_aggregations.replace:
|
||||
if aggregations.replace:
|
||||
# If there is an edit, optionally apply it to the event.
|
||||
edit = event_aggregations.replace
|
||||
edit = aggregations.replace
|
||||
if apply_edits:
|
||||
self._apply_edit(event, serialized_event, edit)
|
||||
|
||||
@@ -522,16 +507,19 @@ class EventClientSerializer:
|
||||
"sender": edit.sender,
|
||||
}
|
||||
|
||||
# Include any threaded replies to this event.
|
||||
if event_aggregations.thread:
|
||||
thread = event_aggregations.thread
|
||||
# If this event is the start of a thread, include a summary of the replies.
|
||||
if aggregations.thread:
|
||||
thread = aggregations.thread
|
||||
|
||||
serialized_latest_event = self.serialize_event(
|
||||
thread.latest_event,
|
||||
time_now,
|
||||
config=config,
|
||||
bundle_aggregations=bundled_aggregations,
|
||||
# Don't bundle aggregations as this could recurse forever.
|
||||
serialized_latest_event = serialize_event(
|
||||
thread.latest_event, time_now, config=config
|
||||
)
|
||||
# Manually apply an edit, if one exists.
|
||||
if thread.latest_edit:
|
||||
self._apply_edit(
|
||||
thread.latest_event, serialized_latest_event, thread.latest_edit
|
||||
)
|
||||
|
||||
thread_summary = {
|
||||
"latest_event": serialized_latest_event,
|
||||
@@ -581,20 +569,10 @@ class EventClientSerializer:
|
||||
]
|
||||
|
||||
|
||||
_PowerLevel = Union[str, int]
|
||||
|
||||
|
||||
def copy_and_fixup_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||
|
||||
We accept as input power level values which are strings, provided they represent an
|
||||
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||
in the returned dictionary (hence "fixup" in the function name).
|
||||
|
||||
Note that future room versions will outlaw such stringy power levels (see
|
||||
https://github.com/matrix-org/matrix-spec/issues/853).
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
TypeError if the input does not look like a valid power levels event content
|
||||
@@ -603,47 +581,29 @@ def copy_and_fixup_power_levels_contents(
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
power_levels[k] = v
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
h: Dict[str, int] = {}
|
||||
power_levels[k] = h
|
||||
for k1, v1 in v.items():
|
||||
_copy_power_level_value_as_integer(v1, h, k1)
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
raise TypeError(
|
||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
||||
)
|
||||
h[k1] = v1
|
||||
continue
|
||||
|
||||
else:
|
||||
_copy_power_level_value_as_integer(v, power_levels, k)
|
||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
||||
|
||||
return power_levels
|
||||
|
||||
|
||||
def _copy_power_level_value_as_integer(
|
||||
old_value: object,
|
||||
power_levels: MutableMapping[str, Any],
|
||||
key: str,
|
||||
) -> None:
|
||||
"""Set `power_levels[key]` to the integer represented by `old_value`.
|
||||
|
||||
:raises TypeError: if `old_value` is not an integer, nor a base-10 string
|
||||
representation of an integer.
|
||||
"""
|
||||
if isinstance(old_value, int):
|
||||
power_levels[key] = old_value
|
||||
return
|
||||
|
||||
if isinstance(old_value, str):
|
||||
try:
|
||||
parsed_value = int(old_value, base=10)
|
||||
except ValueError:
|
||||
# Fall through to the final TypeError.
|
||||
pass
|
||||
else:
|
||||
power_levels[key] = parsed_value
|
||||
return
|
||||
|
||||
raise TypeError(f"Invalid power_levels value for {key}: {old_value}")
|
||||
|
||||
|
||||
def validate_canonicaljson(value: Any) -> None:
|
||||
"""
|
||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion
|
||||
@@ -99,9 +98,9 @@ class FederationBase:
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if spam_check is not synapse.spam_checker_api.Allow.ALLOW:
|
||||
if result:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
# we redact (to save disk space) as well as soft-failing (to stop
|
||||
# using the event in prev_events).
|
||||
|
||||
@@ -618,7 +618,7 @@ class FederationClient(FederationBase):
|
||||
#
|
||||
# Dendrite returns a 404 (with a body of "404 page not found");
|
||||
# Conduit returns a 404 (with no body); and Synapse returns a 400
|
||||
# with M_UNRECOGNIZED.
|
||||
# with M_UNRECOGNISED.
|
||||
#
|
||||
# This needs to be rather specific as some endpoints truly do return 404
|
||||
# errors.
|
||||
@@ -1426,8 +1426,6 @@ class FederationClient(FederationBase):
|
||||
room = res.get("room")
|
||||
if not isinstance(room, dict):
|
||||
raise InvalidResponseError("'room' must be a dict")
|
||||
if room.get("room_id") != room_id:
|
||||
raise InvalidResponseError("wrong room returned in hierarchy response")
|
||||
|
||||
# Validate children_state of the room.
|
||||
children_state = room.pop("children_state", [])
|
||||
|
||||
@@ -110,7 +110,6 @@ class FederationServer(FederationBase):
|
||||
|
||||
self.handler = hs.get_federation_handler()
|
||||
self.storage = hs.get_storage()
|
||||
self._spam_checker = hs.get_spam_checker()
|
||||
self._federation_event_handler = hs.get_federation_event_handler()
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
@@ -1020,12 +1019,6 @@ class FederationServer(FederationBase):
|
||||
except SynapseError as e:
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(pdu):
|
||||
logger.warning(
|
||||
"Unstaged federated event contains spam, dropping %s", pdu.event_id
|
||||
)
|
||||
return
|
||||
|
||||
# Add the event to our staging area
|
||||
await self.store.insert_received_event_to_staging(origin, pdu)
|
||||
|
||||
@@ -1039,41 +1032,6 @@ class FederationServer(FederationBase):
|
||||
pdu.room_id, room_version, lock, origin, pdu
|
||||
)
|
||||
|
||||
async def _get_next_nonspam_staged_event_for_room(
|
||||
self, room_id: str, room_version: RoomVersion
|
||||
) -> Optional[Tuple[str, EventBase]]:
|
||||
"""Fetch the first non-spam event from staging queue.
|
||||
|
||||
Args:
|
||||
room_id: the room to fetch the first non-spam event in.
|
||||
room_version: the version of the room.
|
||||
|
||||
Returns:
|
||||
The first non-spam event in that room.
|
||||
"""
|
||||
|
||||
while True:
|
||||
# We need to do this check outside the lock to avoid a race between
|
||||
# a new event being inserted by another instance and it attempting
|
||||
# to acquire the lock.
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
|
||||
if next is None:
|
||||
return None
|
||||
|
||||
origin, event = next
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(event):
|
||||
logger.warning(
|
||||
"Staged federated event contains spam, dropping %s",
|
||||
event.event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
return next
|
||||
|
||||
@wrap_as_background_process("_process_incoming_pdus_in_room_inner")
|
||||
async def _process_incoming_pdus_in_room_inner(
|
||||
self,
|
||||
@@ -1151,10 +1109,12 @@ class FederationServer(FederationBase):
|
||||
(self._clock.time_msec() - received_ts) / 1000
|
||||
)
|
||||
|
||||
next = await self._get_next_nonspam_staged_event_for_room(
|
||||
# We need to do this check outside the lock to avoid a race between
|
||||
# a new event being inserted by another instance and it attempting
|
||||
# to acquire the lock.
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
|
||||
if not next:
|
||||
break
|
||||
|
||||
|
||||
@@ -15,17 +15,7 @@
|
||||
import abc
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
@@ -353,16 +343,9 @@ class FederationSender(AbstractFederationSender):
|
||||
last_token, self._last_poked_id, limit=100
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Handling %i -> %i: %i events to send (current id %i)",
|
||||
last_token,
|
||||
next_token,
|
||||
len(events),
|
||||
self._last_poked_id,
|
||||
)
|
||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
logger.debug("All events processed")
|
||||
break
|
||||
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
@@ -370,56 +353,12 @@ class FederationSender(AbstractFederationSender):
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
logger.debug("Not sending remote-origin event %s", event)
|
||||
return
|
||||
|
||||
# We also want to not send out-of-band membership events.
|
||||
#
|
||||
# OOB memberships are used in three (and a half) situations:
|
||||
#
|
||||
# (1) invite events which we have received over federation. Those
|
||||
# will have a `sender` on a different server, so will be
|
||||
# skipped by the "is_mine" test above anyway.
|
||||
#
|
||||
# (2) rejections of invites to federated rooms - either remotely
|
||||
# or locally generated. (Such rejections are normally
|
||||
# created via federation, in which case the remote server is
|
||||
# responsible for sending out the rejection. If that fails,
|
||||
# we'll create a leave event locally, but that's only really
|
||||
# for the benefit of the invited user - we don't have enough
|
||||
# information to send it out over federation).
|
||||
#
|
||||
# (2a) rescinded knocks. These are identical to rejected invites.
|
||||
#
|
||||
# (3) knock events which we have sent over federation. As with
|
||||
# invite rejections, the remote server should send them out to
|
||||
# the federation.
|
||||
#
|
||||
# So, in all the above cases, we want to ignore such events.
|
||||
#
|
||||
# OOB memberships are always(?) outliers anyway, so if we *don't*
|
||||
# ignore them, we'll get an exception further down when we try to
|
||||
# fetch the membership list for the room.
|
||||
#
|
||||
# Arguably, we could equivalently ignore all outliers here, since
|
||||
# in theory the only way for an outlier with a local `sender` to
|
||||
# exist is by being an OOB membership (via one of (2), (2a) or (3)
|
||||
# above).
|
||||
#
|
||||
if event.internal_metadata.is_out_of_band_membership():
|
||||
logger.debug("Not sending OOB membership event %s", event)
|
||||
return
|
||||
|
||||
# Finally, there are some other events that we should not send out
|
||||
# until someone asks for them. They are explicitly flagged as such
|
||||
# with `proactively_send: False`.
|
||||
if not event.internal_metadata.should_proactively_send():
|
||||
logger.debug(
|
||||
"Not sending event with proactively_send=false: %s", event
|
||||
)
|
||||
return
|
||||
|
||||
destinations: Optional[Collection[str]] = None
|
||||
destinations: Optional[Set[str]] = None
|
||||
if not event.prev_event_ids():
|
||||
# If there are no prev event IDs then the state is empty
|
||||
# and so no remote servers in the room
|
||||
@@ -454,7 +393,7 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
return
|
||||
|
||||
sharded_destinations = {
|
||||
destinations = {
|
||||
d
|
||||
for d in destinations
|
||||
if self._federation_shard_config.should_handle(
|
||||
@@ -466,12 +405,12 @@ class FederationSender(AbstractFederationSender):
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
sharded_destinations.discard(send_on_behalf_of)
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, sharded_destinations)
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
if sharded_destinations:
|
||||
await self._send_pdu(event, sharded_destinations)
|
||||
if destinations:
|
||||
await self._send_pdu(event, destinations)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(event.event_id)
|
||||
@@ -480,10 +419,7 @@ class FederationSender(AbstractFederationSender):
|
||||
"federation_sender"
|
||||
).observe((now - ts) / 1000)
|
||||
|
||||
async def handle_room_events(events: List[EventBase]) -> None:
|
||||
logger.debug(
|
||||
"Handling %i events in room %s", len(events), events[0].room_id
|
||||
)
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
await handle_event(event)
|
||||
@@ -502,7 +438,6 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("Successfully handled up to %i", next_token)
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
if events:
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
|
||||
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX
|
||||
from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable
|
||||
from synapse.http.server import HttpServer, ServletCallback
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -169,16 +169,14 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str
|
||||
"""
|
||||
try:
|
||||
header_str = header_bytes.decode("utf-8")
|
||||
params = re.split(" +", header_str)[1].split(",")
|
||||
params = header_str.split(" ")[1].split(",")
|
||||
param_dict: Dict[str, str] = {
|
||||
k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params]
|
||||
k: v for k, v in [param.split("=", maxsplit=1) for param in params]
|
||||
}
|
||||
|
||||
def strip_quotes(value: str) -> str:
|
||||
if value.startswith('"'):
|
||||
return re.sub(
|
||||
"\\\\(.)", lambda matchobj: matchobj.group(1), value[1:-1]
|
||||
)
|
||||
return value[1:-1]
|
||||
else:
|
||||
return value
|
||||
|
||||
@@ -375,17 +373,6 @@ class BaseFederationServlet:
|
||||
if code is None:
|
||||
continue
|
||||
|
||||
if is_method_cancellable(code):
|
||||
# The wrapper added by `self._wrap` will inherit the cancellable flag,
|
||||
# but the wrapper itself does not support cancellation yet.
|
||||
# Once resolved, the cancellation tests in
|
||||
# `tests/federation/transport/server/test__base.py` can be re-enabled.
|
||||
raise Exception(
|
||||
f"{self.__class__.__name__}.on_{method} has been marked as "
|
||||
"cancellable, but federation servlets do not support cancellation "
|
||||
"yet."
|
||||
)
|
||||
|
||||
server.register_paths(
|
||||
method,
|
||||
(pattern,),
|
||||
|
||||
@@ -934,7 +934,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
# Before deleting the group lets kick everyone out of it
|
||||
users = await self.store.get_users_in_group(group_id, include_private=True)
|
||||
|
||||
async def _kick_user_from_group(user_id: str) -> None:
|
||||
async def _kick_user_from_group(user_id):
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
assert isinstance(
|
||||
|
||||
@@ -23,7 +23,7 @@ from synapse.replication.http.account_data import (
|
||||
ReplicationUserAccountDataRestServlet,
|
||||
)
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import JsonDict, StreamKeyType, UserID
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -105,7 +105,7 @@ class AccountDataHandler:
|
||||
)
|
||||
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
|
||||
"account_data_key", max_stream_id, users=[user_id]
|
||||
)
|
||||
|
||||
await self._notify_modules(user_id, room_id, account_data_type, content)
|
||||
@@ -141,7 +141,7 @@ class AccountDataHandler:
|
||||
)
|
||||
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
|
||||
"account_data_key", max_stream_id, users=[user_id]
|
||||
)
|
||||
|
||||
await self._notify_modules(user_id, None, account_data_type, content)
|
||||
@@ -176,7 +176,7 @@ class AccountDataHandler:
|
||||
)
|
||||
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
|
||||
"account_data_key", max_stream_id, users=[user_id]
|
||||
)
|
||||
return max_stream_id
|
||||
else:
|
||||
@@ -201,7 +201,7 @@ class AccountDataHandler:
|
||||
)
|
||||
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
|
||||
"account_data_key", max_stream_id, users=[user_id]
|
||||
)
|
||||
return max_stream_id
|
||||
else:
|
||||
|
||||
@@ -23,7 +23,6 @@ from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -151,7 +150,7 @@ class AccountValidityHandler:
|
||||
Whether the user has expired.
|
||||
"""
|
||||
for callback in self._is_user_expired_callbacks:
|
||||
expired = await delay_cancellation(callback(user_id))
|
||||
expired = await callback(user_id)
|
||||
if expired is not None:
|
||||
return expired
|
||||
|
||||
|
||||
@@ -38,7 +38,6 @@ from synapse.types import (
|
||||
JsonDict,
|
||||
RoomAlias,
|
||||
RoomStreamToken,
|
||||
StreamKeyType,
|
||||
UserID,
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -60,7 +59,7 @@ class ApplicationServicesHandler:
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.worker.should_notify_appservices
|
||||
self.notify_appservices = hs.config.appservice.notify_appservices
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self._msc2409_to_device_messages_enabled = (
|
||||
hs.config.experimental.msc2409_to_device_messages_enabled
|
||||
@@ -214,8 +213,8 @@ class ApplicationServicesHandler:
|
||||
Args:
|
||||
stream_key: The stream the event came from.
|
||||
|
||||
`stream_key` can be StreamKeyType.TYPING, StreamKeyType.RECEIPT, StreamKeyType.PRESENCE,
|
||||
StreamKeyType.TO_DEVICE or StreamKeyType.DEVICE_LIST. Any other value for `stream_key`
|
||||
`stream_key` can be "typing_key", "receipt_key", "presence_key",
|
||||
"to_device_key" or "device_list_key". Any other value for `stream_key`
|
||||
will cause this function to return early.
|
||||
|
||||
Ephemeral events will only be pushed to appservices that have opted into
|
||||
@@ -236,11 +235,11 @@ class ApplicationServicesHandler:
|
||||
# Only the following streams are currently supported.
|
||||
# FIXME: We should use constants for these values.
|
||||
if stream_key not in (
|
||||
StreamKeyType.TYPING,
|
||||
StreamKeyType.RECEIPT,
|
||||
StreamKeyType.PRESENCE,
|
||||
StreamKeyType.TO_DEVICE,
|
||||
StreamKeyType.DEVICE_LIST,
|
||||
"typing_key",
|
||||
"receipt_key",
|
||||
"presence_key",
|
||||
"to_device_key",
|
||||
"device_list_key",
|
||||
):
|
||||
return
|
||||
|
||||
@@ -259,14 +258,14 @@ class ApplicationServicesHandler:
|
||||
|
||||
# Ignore to-device messages if the feature flag is not enabled
|
||||
if (
|
||||
stream_key == StreamKeyType.TO_DEVICE
|
||||
stream_key == "to_device_key"
|
||||
and not self._msc2409_to_device_messages_enabled
|
||||
):
|
||||
return
|
||||
|
||||
# Ignore device lists if the feature flag is not enabled
|
||||
if (
|
||||
stream_key == StreamKeyType.DEVICE_LIST
|
||||
stream_key == "device_list_key"
|
||||
and not self._msc3202_transaction_extensions_enabled
|
||||
):
|
||||
return
|
||||
@@ -284,15 +283,15 @@ class ApplicationServicesHandler:
|
||||
if (
|
||||
stream_key
|
||||
in (
|
||||
StreamKeyType.TYPING,
|
||||
StreamKeyType.RECEIPT,
|
||||
StreamKeyType.PRESENCE,
|
||||
StreamKeyType.TO_DEVICE,
|
||||
"typing_key",
|
||||
"receipt_key",
|
||||
"presence_key",
|
||||
"to_device_key",
|
||||
)
|
||||
and service.supports_ephemeral
|
||||
)
|
||||
or (
|
||||
stream_key == StreamKeyType.DEVICE_LIST
|
||||
stream_key == "device_list_key"
|
||||
and service.msc3202_transaction_extensions
|
||||
)
|
||||
]
|
||||
@@ -318,7 +317,7 @@ class ApplicationServicesHandler:
|
||||
logger.debug("Checking interested services for %s", stream_key)
|
||||
with Measure(self.clock, "notify_interested_services_ephemeral"):
|
||||
for service in services:
|
||||
if stream_key == StreamKeyType.TYPING:
|
||||
if stream_key == "typing_key":
|
||||
# Note that we don't persist the token (via set_appservice_stream_type_pos)
|
||||
# for typing_key due to performance reasons and due to their highly
|
||||
# ephemeral nature.
|
||||
@@ -334,7 +333,7 @@ class ApplicationServicesHandler:
|
||||
async with self._ephemeral_events_linearizer.queue(
|
||||
(service.id, stream_key)
|
||||
):
|
||||
if stream_key == StreamKeyType.RECEIPT:
|
||||
if stream_key == "receipt_key":
|
||||
events = await self._handle_receipts(service, new_token)
|
||||
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
|
||||
|
||||
@@ -343,7 +342,7 @@ class ApplicationServicesHandler:
|
||||
service, "read_receipt", new_token
|
||||
)
|
||||
|
||||
elif stream_key == StreamKeyType.PRESENCE:
|
||||
elif stream_key == "presence_key":
|
||||
events = await self._handle_presence(service, users, new_token)
|
||||
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
|
||||
|
||||
@@ -352,7 +351,7 @@ class ApplicationServicesHandler:
|
||||
service, "presence", new_token
|
||||
)
|
||||
|
||||
elif stream_key == StreamKeyType.TO_DEVICE:
|
||||
elif stream_key == "to_device_key":
|
||||
# Retrieve a list of to-device message events, as well as the
|
||||
# maximum stream token of the messages we were able to retrieve.
|
||||
to_device_messages = await self._get_to_device_messages(
|
||||
@@ -367,7 +366,7 @@ class ApplicationServicesHandler:
|
||||
service, "to_device", new_token
|
||||
)
|
||||
|
||||
elif stream_key == StreamKeyType.DEVICE_LIST:
|
||||
elif stream_key == "device_list_key":
|
||||
device_list_summary = await self._get_device_list_summary(
|
||||
service, new_token
|
||||
)
|
||||
|
||||
@@ -41,7 +41,6 @@ import pymacaroons
|
||||
import unpaddedbase64
|
||||
from pymacaroons.exceptions import MacaroonVerificationFailedException
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
@@ -68,7 +67,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
@@ -552,7 +551,7 @@ class AuthHandler:
|
||||
await self.store.set_ui_auth_clientdict(sid, clientdict)
|
||||
|
||||
user_agent = get_request_user_agent(request)
|
||||
clientip = request.getClientAddress().host
|
||||
clientip = request.getClientIP()
|
||||
|
||||
await self.store.add_user_agent_ip_to_ui_auth_session(
|
||||
session.session_id, user_agent, clientip
|
||||
@@ -2203,11 +2202,7 @@ class PasswordAuthProvider:
|
||||
# other than None (i.e. until a callback returns a success)
|
||||
for callback in self.auth_checker_callbacks[login_type]:
|
||||
try:
|
||||
result = await delay_cancellation(
|
||||
callback(username, login_type, login_dict)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(username, login_type, login_dict)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2268,9 +2263,7 @@ class PasswordAuthProvider:
|
||||
|
||||
for callback in self.check_3pid_auth_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(medium, address, password))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(medium, address, password)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2352,7 +2345,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_username_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2366,8 +2359,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_username_for_registration: %s",
|
||||
@@ -2397,7 +2388,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_displayname_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2411,8 +2402,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_displayname_for_registration: %s",
|
||||
@@ -2440,7 +2429,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.is_3pid_allowed_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(medium, address, registration))
|
||||
res = await callback(medium, address, registration)
|
||||
|
||||
if res is False:
|
||||
return res
|
||||
@@ -2454,8 +2443,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
||||
raise SynapseError(code=500, msg="Internal Server Error")
|
||||
|
||||
@@ -43,7 +43,6 @@ from synapse.metrics.background_process_metrics import (
|
||||
)
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
StreamKeyType,
|
||||
StreamToken,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
@@ -503,7 +502,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# specify the user ID too since the user should always get their own device list
|
||||
# updates, even if they aren't in any rooms.
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.DEVICE_LIST, position, users={user_id}, rooms=room_ids
|
||||
"device_list_key", position, users={user_id}, rooms=room_ids
|
||||
)
|
||||
|
||||
# We may need to do some processing asynchronously for local user IDs.
|
||||
@@ -524,9 +523,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
from_user_id, user_ids
|
||||
)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.DEVICE_LIST, position, users=[from_user_id]
|
||||
)
|
||||
self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
|
||||
|
||||
async def user_left_room(self, user: UserID, room_id: str) -> None:
|
||||
user_id = user.to_string()
|
||||
|
||||
@@ -26,7 +26,7 @@ from synapse.logging.opentracing import (
|
||||
set_tag,
|
||||
)
|
||||
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
|
||||
from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id
|
||||
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
@@ -151,7 +151,7 @@ class DeviceMessageHandler:
|
||||
# Notify listeners that there are new to-device messages to process,
|
||||
# handing them the latest stream id.
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.TO_DEVICE, last_stream_id, users=local_messages.keys()
|
||||
"to_device_key", last_stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
async def _check_for_unknown_devices(
|
||||
@@ -285,7 +285,7 @@ class DeviceMessageHandler:
|
||||
# Notify listeners that there are new to-device messages to process,
|
||||
# handing them the latest stream id.
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.TO_DEVICE, last_stream_id, users=local_messages.keys()
|
||||
"to_device_key", last_stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
if self.federation_sender:
|
||||
|
||||
@@ -71,9 +71,6 @@ class DirectoryHandler:
|
||||
if wchar in room_alias.localpart:
|
||||
raise SynapseError(400, "Invalid characters in room alias")
|
||||
|
||||
if ":" in room_alias.localpart:
|
||||
raise SynapseError(400, "Invalid character in room alias localpart: ':'.")
|
||||
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
# TODO(erikj): Change this.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from canonicaljson import encode_canonical_json
|
||||
@@ -1105,19 +1105,22 @@ class E2eKeysHandler:
|
||||
# can request over federation
|
||||
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
|
||||
|
||||
cross_signing_keys = await self._retrieve_cross_signing_keys_for_remote_user(
|
||||
user, key_type
|
||||
)
|
||||
if cross_signing_keys is None:
|
||||
(
|
||||
key,
|
||||
key_id,
|
||||
verify_key,
|
||||
) = await self._retrieve_cross_signing_keys_for_remote_user(user, key_type)
|
||||
|
||||
if key is None:
|
||||
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
|
||||
|
||||
return cross_signing_keys
|
||||
return key, key_id, verify_key
|
||||
|
||||
async def _retrieve_cross_signing_keys_for_remote_user(
|
||||
self,
|
||||
user: UserID,
|
||||
desired_key_type: str,
|
||||
) -> Optional[Tuple[Dict[str, Any], str, VerifyKey]]:
|
||||
) -> Tuple[Optional[dict], Optional[str], Optional[VerifyKey]]:
|
||||
"""Queries cross-signing keys for a remote user and saves them to the database
|
||||
|
||||
Only the key specified by `key_type` will be returned, while all retrieved keys
|
||||
@@ -1143,10 +1146,12 @@ class E2eKeysHandler:
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
return None
|
||||
return None, None, None
|
||||
|
||||
# Process each of the retrieved cross-signing keys
|
||||
desired_key_data = None
|
||||
desired_key = None
|
||||
desired_key_id = None
|
||||
desired_verify_key = None
|
||||
retrieved_device_ids = []
|
||||
for key_type in ["master", "self_signing"]:
|
||||
key_content = remote_result.get(key_type + "_key")
|
||||
@@ -1191,7 +1196,9 @@ class E2eKeysHandler:
|
||||
|
||||
# If this is the desired key type, save it and its ID/VerifyKey
|
||||
if key_type == desired_key_type:
|
||||
desired_key_data = key_content, key_id, verify_key
|
||||
desired_key = key_content
|
||||
desired_verify_key = verify_key
|
||||
desired_key_id = key_id
|
||||
|
||||
# At the same time, store this key in the db for subsequent queries
|
||||
await self.store.set_e2e_cross_signing_key(
|
||||
@@ -1205,7 +1212,7 @@ class E2eKeysHandler:
|
||||
user.to_string(), retrieved_device_ids
|
||||
)
|
||||
|
||||
return desired_key_data
|
||||
return desired_key, desired_key_id, desired_verify_key
|
||||
|
||||
|
||||
def _check_cross_signing_key(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user