1
0

Compare commits

..

7 Commits

Author SHA1 Message Date
Erik Johnston
d76698ef30 Up no output timeout 2021-02-18 14:00:59 +00:00
Erik Johnston
48cc4f8903 Try building lxml up front to avoid time outs 2021-02-18 12:08:21 +00:00
Erik Johnston
3fe29250c4 Try building cryptography separately to avoid time outs 2021-02-18 10:17:09 +00:00
Erik Johnston
1dd584b46d Test circleci config 2021-02-17 17:46:29 +00:00
Erik Johnston
6314645c05 Newsfile 2021-02-17 17:45:44 +00:00
Erik Johnston
32b2c4c97f Update circleci config to use cargo cache 2021-02-17 17:45:44 +00:00
Erik Johnston
b64dadc497 Add a Dockerfile that allows using a base image with a cargo cache 2021-02-17 15:09:45 +00:00
230 changed files with 2024 additions and 5815 deletions

View File

@@ -14,7 +14,7 @@ jobs:
platforms: linux/amd64
- docker_build:
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
platforms: linux/amd64,linux/arm64
platforms: linux/amd64,linux/arm/v7,linux/arm64
dockerhubuploadlatest:
docker:
@@ -22,12 +22,12 @@ jobs:
steps:
- checkout
- docker_prepare
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
# - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
# for `latest`, we don't want the arm images to disappear, so don't update the tag
# until all of the platforms are built.
- docker_build:
tag: -t matrixdotorg/synapse:latest
platforms: linux/amd64,linux/arm64
tag: -t 127.0.0.1:5000/synapse:erikj-test
platforms: linux/amd64,linux/arm/v7,linux/arm64
workflows:
build:
@@ -41,7 +41,7 @@ workflows:
- dockerhubuploadlatest:
filters:
branches:
only: master
only: erikj/arm_docker_cache
commands:
docker_prepare:
@@ -52,9 +52,9 @@ commands:
default: "v0.4.1"
steps:
- setup_remote_docker:
# 19.03.13 was the most recent available on circleci at the time of
# 20.10.2 was the most recent available on circleci at the time of
# writing.
version: 19.03.13
version: 20.10.2
- run: apk add --no-cache curl
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
@@ -64,7 +64,10 @@ commands:
# create a context named `builder` for the builds
- run: docker context create builder
# create a buildx builder using the new context, and set it as the default
- run: docker buildx create builder --use
- run: docker buildx create --driver docker-container --driver-opt network=host builder --use
# Start a registry so that have somewhere to store our temporary docker
# images (as multi arch builds don't work with stand local docker store)
- run: docker run -d -p 127.0.0.1:5000:5000 --name registry registry:2
docker_build:
description: Builds and pushed images to dockerhub using buildx
@@ -75,4 +78,7 @@ commands:
tag:
type: string
steps:
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .
- run: docker buildx build -f docker/Dockerfile-cargo-cache --push -t 127.0.0.1:5000/cargo_cache --platform << parameters.platforms >> --progress=plain .
- run:
command: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --build-arg BASE_IMAGE=127.0.0.1:5000/cargo_cache --build-arg CARGO_NET_OFFLINE=true --progress=plain .
no_output_timeout: 30m

View File

@@ -1,8 +0,0 @@
# Black reformatting (#5482).
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
# Target Python 3.5 with black (#8664).
aff1eb7c671b0a3813407321d2702ec46c71fa56
# Update black to 20.8b1 (#9381).
0a00b7ff14890987f09112a2ae696c61001e6cf1

3
.gitignore vendored
View File

@@ -6,14 +6,13 @@
*.egg
*.egg-info
*.lock
*.py[cod]
*.pyc
*.snap
*.tac
_trial_temp/
_trial_temp*/
/out
.DS_Store
__pycache__/
# stuff that is likely to exist when you run a server locally
/*.db

View File

@@ -1,255 +1,9 @@
Synapse 1.30.0 (2021-03-22)
===========================
Note that this release deprecates the ability for appservices to
call `POST /_matrix/client/r0/register` without the body parameter `type`. Appservice
developers should use a `type` value of `m.login.application_service` as
per [the spec](https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions).
In future releases, calling this endpoint with an access token - but without a `m.login.application_service`
type - will fail.
No significant changes.
Synapse 1.30.0rc1 (2021-03-16)
==============================
Features
--------
- Add prometheus metrics for number of users successfully registering and logging in. ([\#9510](https://github.com/matrix-org/synapse/issues/9510), [\#9511](https://github.com/matrix-org/synapse/issues/9511), [\#9573](https://github.com/matrix-org/synapse/issues/9573))
- Add `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time` prometheus metrics, which monitor federation delays by reporting the timestamps of messages sent and received to a set of remote servers. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
- Add support for generating JSON Web Tokens dynamically for use as OIDC client secrets. ([\#9549](https://github.com/matrix-org/synapse/issues/9549))
- Optimise handling of incomplete room history for incoming federation. ([\#9601](https://github.com/matrix-org/synapse/issues/9601))
- Finalise support for allowing clients to pick an SSO Identity Provider ([MSC2858](https://github.com/matrix-org/matrix-doc/pull/2858)). ([\#9617](https://github.com/matrix-org/synapse/issues/9617))
- Tell spam checker modules about the SSO IdP a user registered through if one was used. ([\#9626](https://github.com/matrix-org/synapse/issues/9626))
Bugfixes
--------
- Fix long-standing bug when generating thumbnails for some images with transparency: `TypeError: cannot unpack non-iterable int object`. ([\#9473](https://github.com/matrix-org/synapse/issues/9473))
- Purge chain cover indexes for events that were purged prior to Synapse v1.29.0. ([\#9542](https://github.com/matrix-org/synapse/issues/9542), [\#9583](https://github.com/matrix-org/synapse/issues/9583))
- Fix bug where federation requests were not correctly retried on 5xx responses. ([\#9567](https://github.com/matrix-org/synapse/issues/9567))
- Fix re-activating an account via the admin API when local passwords are disabled. ([\#9587](https://github.com/matrix-org/synapse/issues/9587))
- Fix a bug introduced in Synapse 1.20 which caused incoming federation transactions to stack up, causing slow recovery from outages. ([\#9597](https://github.com/matrix-org/synapse/issues/9597))
- Fix a bug introduced in v1.28.0 where the OpenID Connect callback endpoint could error with a `MacaroonInitException`. ([\#9620](https://github.com/matrix-org/synapse/issues/9620))
- Fix Internal Server Error on `GET /_synapse/client/saml2/authn_response` request. ([\#9623](https://github.com/matrix-org/synapse/issues/9623))
Updates to the Docker image
---------------------------
- Make use of an improved malloc implementation (`jemalloc`) in the docker image. ([\#8553](https://github.com/matrix-org/synapse/issues/8553))
Improved Documentation
----------------------
- Add relayd entry to reverse proxy example configurations. ([\#9508](https://github.com/matrix-org/synapse/issues/9508))
- Improve the SAML2 upgrade notes for 1.27.0. ([\#9550](https://github.com/matrix-org/synapse/issues/9550))
- Link to the "List user's media" admin API from the media admin API docs. ([\#9571](https://github.com/matrix-org/synapse/issues/9571))
- Clarify the spam checker modules documentation example to mention that `parse_config` is a required method. ([\#9580](https://github.com/matrix-org/synapse/issues/9580))
- Clarify the sample configuration for `stats` settings. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
Deprecations and Removals
-------------------------
- The `synapse_federation_last_sent_pdu_age` and `synapse_federation_last_received_pdu_age` prometheus metrics have been removed. They are replaced by `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time`. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
- Registering an Application Service user without using the `m.login.application_service` login type will be unsupported in an upcoming Synapse release. ([\#9559](https://github.com/matrix-org/synapse/issues/9559))
Internal Changes
----------------
- Add tests to ResponseCache. ([\#9458](https://github.com/matrix-org/synapse/issues/9458))
- Add type hints to purge room and server notice admin API. ([\#9520](https://github.com/matrix-org/synapse/issues/9520))
- Add extra logging to ObservableDeferred when callbacks throw exceptions. ([\#9523](https://github.com/matrix-org/synapse/issues/9523))
- Fix incorrect type hints. ([\#9528](https://github.com/matrix-org/synapse/issues/9528), [\#9543](https://github.com/matrix-org/synapse/issues/9543), [\#9591](https://github.com/matrix-org/synapse/issues/9591), [\#9608](https://github.com/matrix-org/synapse/issues/9608), [\#9618](https://github.com/matrix-org/synapse/issues/9618))
- Add an additional test for purging a room. ([\#9541](https://github.com/matrix-org/synapse/issues/9541))
- Add a `.git-blame-ignore-revs` file with the hashes of auto-formatting. ([\#9560](https://github.com/matrix-org/synapse/issues/9560))
- Increase the threshold before which outbound federation to a server goes into "catch up" mode, which is expensive for the remote server to handle. ([\#9561](https://github.com/matrix-org/synapse/issues/9561))
- Fix spurious errors reported by the `config-lint.sh` script. ([\#9562](https://github.com/matrix-org/synapse/issues/9562))
- Fix type hints and tests for BlacklistingAgentWrapper and BlacklistingReactorWrapper. ([\#9563](https://github.com/matrix-org/synapse/issues/9563))
- Do not have mypy ignore type hints from unpaddedbase64. ([\#9568](https://github.com/matrix-org/synapse/issues/9568))
- Improve efficiency of calculating the auth chain in large rooms. ([\#9576](https://github.com/matrix-org/synapse/issues/9576))
- Convert `synapse.types.Requester` to an `attrs` class. ([\#9586](https://github.com/matrix-org/synapse/issues/9586))
- Add logging for redis connection setup. ([\#9590](https://github.com/matrix-org/synapse/issues/9590))
- Improve logging when processing incoming transactions. ([\#9596](https://github.com/matrix-org/synapse/issues/9596))
- Remove unused `stats.retention` setting, and emit a warning if stats are disabled. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
- Prevent attempting to bundle aggregations for state events in /context APIs. ([\#9619](https://github.com/matrix-org/synapse/issues/9619))
Synapse 1.29.0 (2021-03-08)
===========================
Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
No significant changes.
Synapse 1.29.0rc1 (2021-03-04)
==============================
Features
--------
- Add rate limiters to cross-user key sharing requests. ([\#8957](https://github.com/matrix-org/synapse/issues/8957))
- Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel. ([\#8978](https://github.com/matrix-org/synapse/issues/8978))
- Add some configuration settings to make users' profile data more private. ([\#9203](https://github.com/matrix-org/synapse/issues/9203))
- The `no_proxy` and `NO_PROXY` environment variables are now respected in proxied HTTP clients with the lowercase form taking precedence if both are present. Additionally, the lowercase `https_proxy` environment variable is now respected in proxied HTTP clients on top of existing support for the uppercase `HTTPS_PROXY` form and takes precedence if both are present. Contributed by Timothy Leung. ([\#9372](https://github.com/matrix-org/synapse/issues/9372))
- Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users. ([\#9383](https://github.com/matrix-org/synapse/issues/9383), [\#9385](https://github.com/matrix-org/synapse/issues/9385))
- Add support for regenerating thumbnails if they have been deleted but the original image is still stored. ([\#9438](https://github.com/matrix-org/synapse/issues/9438))
- Add support for `X-Forwarded-Proto` header when using a reverse proxy. ([\#9472](https://github.com/matrix-org/synapse/issues/9472), [\#9501](https://github.com/matrix-org/synapse/issues/9501), [\#9512](https://github.com/matrix-org/synapse/issues/9512), [\#9539](https://github.com/matrix-org/synapse/issues/9539))
Bugfixes
--------
- Fix a bug where users' pushers were not all deleted when they deactivated their account. ([\#9285](https://github.com/matrix-org/synapse/issues/9285), [\#9516](https://github.com/matrix-org/synapse/issues/9516))
- Fix a bug where a lot of unnecessary presence updates were sent when joining a room. ([\#9402](https://github.com/matrix-org/synapse/issues/9402))
- Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results. ([\#9416](https://github.com/matrix-org/synapse/issues/9416))
- Fix a bug in single sign-on which could cause a "No session cookie found" error. ([\#9436](https://github.com/matrix-org/synapse/issues/9436))
- Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined. ([\#9440](https://github.com/matrix-org/synapse/issues/9440))
- Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`. ([\#9449](https://github.com/matrix-org/synapse/issues/9449))
- Fix deleting pushers when using sharded pushers. ([\#9465](https://github.com/matrix-org/synapse/issues/9465), [\#9466](https://github.com/matrix-org/synapse/issues/9466), [\#9479](https://github.com/matrix-org/synapse/issues/9479), [\#9536](https://github.com/matrix-org/synapse/issues/9536))
- Fix missing startup checks for the consistency of certain PostgreSQL sequences. ([\#9470](https://github.com/matrix-org/synapse/issues/9470))
- Fix a long-standing bug where the media repository could leak file descriptors while previewing media. ([\#9497](https://github.com/matrix-org/synapse/issues/9497))
- Properly purge the event chain cover index when purging history. ([\#9498](https://github.com/matrix-org/synapse/issues/9498))
- Fix missing chain cover index due to a schema delta not being applied correctly. Only affected servers that ran development versions. ([\#9503](https://github.com/matrix-org/synapse/issues/9503))
- Fix a bug introduced in v1.25.0 where `/_synapse/admin/join/` would fail when given a room alias. ([\#9506](https://github.com/matrix-org/synapse/issues/9506))
- Prevent presence background jobs from running when presence is disabled. ([\#9530](https://github.com/matrix-org/synapse/issues/9530))
- Fix rare edge case that caused a background update to fail if the server had rejected an event that had duplicate auth events. ([\#9537](https://github.com/matrix-org/synapse/issues/9537))
Improved Documentation
----------------------
- Update the example systemd config to propagate reloads to individual units. ([\#9463](https://github.com/matrix-org/synapse/issues/9463))
Internal Changes
----------------
- Add documentation and type hints to `parse_duration`. ([\#9432](https://github.com/matrix-org/synapse/issues/9432))
- Remove vestiges of `uploads_path` configuration setting. ([\#9462](https://github.com/matrix-org/synapse/issues/9462))
- Add a comment about systemd-python. ([\#9464](https://github.com/matrix-org/synapse/issues/9464))
- Test that we require validated email for email pushers. ([\#9496](https://github.com/matrix-org/synapse/issues/9496))
- Allow python to generate bytecode for synapse. ([\#9502](https://github.com/matrix-org/synapse/issues/9502))
- Fix incorrect type hints. ([\#9515](https://github.com/matrix-org/synapse/issues/9515), [\#9518](https://github.com/matrix-org/synapse/issues/9518))
- Add type hints to device and event report admin API. ([\#9519](https://github.com/matrix-org/synapse/issues/9519))
- Add type hints to user admin API. ([\#9521](https://github.com/matrix-org/synapse/issues/9521))
- Bump the versions of mypy and mypy-zope used for static type checking. ([\#9529](https://github.com/matrix-org/synapse/issues/9529))
Synapse 1.28.0 (2021-02-25)
===========================
Note that this release drops support for ARMv7 in the official Docker images, due to repeated problems building for ARMv7 (and the associated maintenance burden this entails).
This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
Internal Changes
----------------
- Revert change in v1.28.0rc1 to remove the deprecated SAML endpoint. ([\#9474](https://github.com/matrix-org/synapse/issues/9474))
Synapse 1.28.0rc1 (2021-02-19)
==============================
Removal warning
---------------
The v1 list accounts API is deprecated and will be removed in a future release.
This API was undocumented and misleading. It can be replaced by the
[v2 list accounts API](https://github.com/matrix-org/synapse/blob/release-v1.28.0/docs/admin_api/user_admin_api.rst#list-accounts),
which has been available since Synapse 1.7.0 (2019-12-13).
Please check if you're using any scripts which use the admin API and replace
`GET /_synapse/admin/v1/users/<user_id>` with `GET /_synapse/admin/v2/users`.
Features
--------
- New admin API to get the context of an event: `/_synapse/admin/rooms/{roomId}/context/{eventId}`. ([\#9150](https://github.com/matrix-org/synapse/issues/9150))
- Further improvements to the user experience of registration via single sign-on. ([\#9300](https://github.com/matrix-org/synapse/issues/9300), [\#9301](https://github.com/matrix-org/synapse/issues/9301))
- Add hook to spam checker modules that allow checking file uploads and remote downloads. ([\#9311](https://github.com/matrix-org/synapse/issues/9311))
- Add support for receiving OpenID Connect authentication responses via form `POST`s rather than `GET`s. ([\#9376](https://github.com/matrix-org/synapse/issues/9376))
- Add the shadow-banning status to the admin API for user info. ([\#9400](https://github.com/matrix-org/synapse/issues/9400))
Bugfixes
--------
- Fix long-standing bug where sending email notifications would fail for rooms that the server had since left. ([\#9257](https://github.com/matrix-org/synapse/issues/9257))
- Fix bug introduced in Synapse 1.27.0rc1 which meant the "session expired" error page during SSO registration was badly formatted. ([\#9296](https://github.com/matrix-org/synapse/issues/9296))
- Assert a maximum length for some parameters for spec compliance. ([\#9321](https://github.com/matrix-org/synapse/issues/9321), [\#9393](https://github.com/matrix-org/synapse/issues/9393))
- Fix additional errors when previewing URLs: "AttributeError 'NoneType' object has no attribute 'xpath'" and "ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.". ([\#9333](https://github.com/matrix-org/synapse/issues/9333))
- Fix a bug causing Synapse to impose the wrong type constraints on fields when processing responses from appservices to `/_matrix/app/v1/thirdparty/user/{protocol}`. ([\#9361](https://github.com/matrix-org/synapse/issues/9361))
- Fix bug where Synapse would occasionally stop reconnecting to Redis after the connection was lost. ([\#9391](https://github.com/matrix-org/synapse/issues/9391))
- Fix a long-standing bug when upgrading a room: "TypeError: '>' not supported between instances of 'NoneType' and 'int'". ([\#9395](https://github.com/matrix-org/synapse/issues/9395))
- Reduce the amount of memory used when generating the URL preview of a file that is larger than the `max_spider_size`. ([\#9421](https://github.com/matrix-org/synapse/issues/9421))
- Fix a long-standing bug in the deduplication of old presence, resulting in no deduplication. ([\#9425](https://github.com/matrix-org/synapse/issues/9425))
- The `ui_auth.session_timeout` config option can now be specified in terms of number of seconds/minutes/etc/. Contributed by Rishabh Arya. ([\#9426](https://github.com/matrix-org/synapse/issues/9426))
- Fix a bug introduced in v1.27.0: "TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType." related to the user directory. ([\#9428](https://github.com/matrix-org/synapse/issues/9428))
Updates to the Docker image
---------------------------
- Drop support for ARMv7 in Docker images. ([\#9433](https://github.com/matrix-org/synapse/issues/9433))
Improved Documentation
----------------------
- Reorganize CHANGELOG.md. ([\#9281](https://github.com/matrix-org/synapse/issues/9281))
- Add note to `auto_join_rooms` config option explaining existing rooms must be publicly joinable. ([\#9291](https://github.com/matrix-org/synapse/issues/9291))
- Correct name of Synapse's service file in TURN howto. ([\#9308](https://github.com/matrix-org/synapse/issues/9308))
- Fix the braces in the `oidc_providers` section of the sample config. ([\#9317](https://github.com/matrix-org/synapse/issues/9317))
- Update installation instructions on Fedora. ([\#9322](https://github.com/matrix-org/synapse/issues/9322))
- Add HTTP/2 support to the nginx example configuration. Contributed by David Vo. ([\#9390](https://github.com/matrix-org/synapse/issues/9390))
- Update docs for using Gitea as OpenID provider. ([\#9404](https://github.com/matrix-org/synapse/issues/9404))
- Document that pusher instances are shardable. ([\#9407](https://github.com/matrix-org/synapse/issues/9407))
- Fix erroneous documentation from v1.27.0 about updating the SAML2 callback URL. ([\#9434](https://github.com/matrix-org/synapse/issues/9434))
Deprecations and Removals
-------------------------
- Deprecate old admin API `GET /_synapse/admin/v1/users/<user_id>`. ([\#9429](https://github.com/matrix-org/synapse/issues/9429))
Internal Changes
----------------
- Fix 'object name reserved for internal use' errors with recent versions of SQLite. ([\#9003](https://github.com/matrix-org/synapse/issues/9003))
- Add experimental support for running Synapse with PyPy. ([\#9123](https://github.com/matrix-org/synapse/issues/9123))
- Deny access to additional IP addresses by default. ([\#9240](https://github.com/matrix-org/synapse/issues/9240))
- Update the `Cursor` type hints to better match PEP 249. ([\#9299](https://github.com/matrix-org/synapse/issues/9299))
- Add debug logging for SRV lookups. Contributed by @Bubu. ([\#9305](https://github.com/matrix-org/synapse/issues/9305))
- Improve logging for OIDC login flow. ([\#9307](https://github.com/matrix-org/synapse/issues/9307))
- Share the code for handling required attributes between the CAS and SAML handlers. ([\#9326](https://github.com/matrix-org/synapse/issues/9326))
- Clean up the code to load the metadata for OpenID Connect identity providers. ([\#9362](https://github.com/matrix-org/synapse/issues/9362))
- Convert tests to use `HomeserverTestCase`. ([\#9377](https://github.com/matrix-org/synapse/issues/9377), [\#9396](https://github.com/matrix-org/synapse/issues/9396))
- Update the version of black used to 20.8b1. ([\#9381](https://github.com/matrix-org/synapse/issues/9381))
- Allow OIDC config to override discovered values. ([\#9384](https://github.com/matrix-org/synapse/issues/9384))
- Remove some dead code from the acceptance of room invites path. ([\#9394](https://github.com/matrix-org/synapse/issues/9394))
- Clean up an unused method in the presence handler code. ([\#9408](https://github.com/matrix-org/synapse/issues/9408))
Synapse 1.27.0 (2021-02-16)
===========================
Note that this release includes a change in Synapse to use Redis as a cache ─ as well as a pub/sub mechanism ─ if Redis support is enabled for workers. No action is needed by server administrators, and we do not expect resource usage of the Redis instance to change dramatically.
This release also changes the callback URI for OpenID Connect (OIDC) and SAML2 identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 or SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
This release also changes the callback URI for OpenID Connect (OIDC) identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
This release also changes escaping of variables in the HTML templates for SSO or email notifications. If you have customised these templates, please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.

View File

@@ -1,31 +1,4 @@
Welcome to Synapse
This document aims to get you started with contributing to this repo!
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
- [2. What do I need?](#2-what-do-i-need)
- [3. Get the source.](#3-get-the-source)
- [4. Install the dependencies](#4-install-the-dependencies)
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
* [Under Windows](#under-windows)
- [5. Get in touch.](#5-get-in-touch)
- [6. Pick an issue.](#6-pick-an-issue)
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
- [8. Test, test, test!](#8-test-test-test)
* [Run the linters.](#run-the-linters)
* [Run the unit tests.](#run-the-unit-tests)
* [Run the integration tests.](#run-the-integration-tests)
- [9. Submit your patch.](#9-submit-your-patch)
* [Changelog](#changelog)
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
+ [Debian changelog](#debian-changelog)
* [Sign off](#sign-off)
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
- [11. Find a new issue.](#11-find-a-new-issue)
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
- [Conclusion](#conclusion)
# 1. Who can contribute to Synapse?
# Contributing code to Synapse
Everyone is welcome to contribute code to [matrix.org
projects](https://github.com/matrix-org), provided that they are willing to
@@ -36,179 +9,70 @@ license the code under the same terms as the project's overall 'outbound'
license - in our case, this is almost always Apache Software License v2 (see
[LICENSE](LICENSE)).
# 2. What do I need?
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
# 3. Get the source.
## How to contribute
The preferred and easiest way to contribute changes is to fork the relevant
project on GitHub, and then [create a pull request](
project on github, and then [create a pull request](
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
changes into our repo.
Please base your changes on the `develop` branch.
Some other points to follow:
```sh
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
git checkout develop
```
* Please base your changes on the `develop` branch.
If you need help getting started with git, this is beyond the scope of the document, but you
can find many good git tutorials on the web.
* Please follow the [code style requirements](#code-style).
# 4. Install the dependencies
* Please include a [changelog entry](#changelog) with each PR.
## Under Unix (macOS, Linux, BSD, ...)
* Please [sign off](#sign-off) your contribution.
Once you have installed Python 3 and added the source, please open a terminal and
setup a *virtualenv*, as follows:
* Please keep an eye on the pull request for feedback from the [continuous
integration system](#continuous-integration-and-testing) and try to fix any
errors that come up.
```sh
cd path/where/you/have/cloned/the/repository
python3 -m venv ./env
source ./env/bin/activate
pip install -e ".[all,lint,mypy,test]"
pip install tox
```
* If you need to [update your PR](#updating-your-pull-request), just add new
commits to your branch rather than rebasing.
This will install the developer dependencies for the project.
## Under Windows
TBD
# 5. Get in touch.
Join our developer community on Matrix: #synapse-dev:matrix.org !
# 6. Pick an issue.
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
to work on.
# 7. Turn coffee and documentation into code and documentation!
## Code style
Synapse's code style is documented [here](docs/code_style.md). Please follow
it, including the conventions for the [sample configuration
file](docs/code_style.md#configuration-file-format).
There is a growing amount of documentation located in the [docs](docs)
directory. This documentation is intended primarily for sysadmins running their
own Synapse instance, as well as developers interacting externally with
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
Many of the conventions are enforced by scripts which are run as part of the
[continuous integration system](#continuous-integration-and-testing). To help
check if you have followed the code style, you can run `scripts-dev/lint.sh`
locally. You'll need python 3.6 or later, and to install a number of tools:
If you add new files added to either of these folders, please use [GitHub-Flavoured
Markdown](https://guides.github.com/features/mastering-markdown/).
```
# Install the dependencies
pip install -e ".[lint,mypy]"
Some documentation also exists in [Synapse's GitHub
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
# 8. Test, test, test!
<a name="test-test-test"></a>
While you're developing and before submitting a patch, you'll
want to test your code.
## Run the linters.
The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code.
They're pretty fast, don't hesitate!
```sh
source ./env/bin/activate
# Run the linter script
./scripts-dev/lint.sh
```
Note that this script *will modify your files* to fix styling errors.
Make sure that you have saved all your files.
**Note that the script does not just test/check, but also reformats code, so you
may wish to ensure any new code is committed first**.
If you wish to restrict the linters to only the files changed since the last commit
(much faster!), you can instead run:
By default, this script checks all files and can take some time; if you alter
only certain files, you might wish to specify paths as arguments to reduce the
run-time:
```sh
source ./env/bin/activate
./scripts-dev/lint.sh -d
```
Or if you know exactly which files you wish to lint, you can instead run:
```sh
source ./env/bin/activate
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
## Run the unit tests.
You can also provide the `-d` option, which will lint the files that have been
changed since the last git commit. This will often be significantly faster than
linting the whole codebase.
The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors.
```sh
source ./env/bin/activate
trial tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
```sh
source ./env/bin/activate
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
```
If your tests fail, you may wish to look at the logs:
```sh
less _trial_temp/test.log
```
## Run the integration tests.
The integration tests are a more comprehensive suite of tests. They
run a full version of Synapse, including your changes, to check if
anything was broken. They are slower than the unit tests but will
typically catch more errors.
The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
```
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
# 9. Submit your patch.
Once you're happy with your patch, it's time to prepare a Pull Request.
To prepare a Pull Request, please:
1. verify that [all the tests pass](#test-test-test), including the coding style;
2. [sign off](#sign-off) your contribution;
3. `git push` your commit to your fork of Synapse;
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
5. add a [changelog entry](#changelog) and push it to your Pull Request;
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
Before pushing new changes, ensure they don't produce linting errors. Commit any
files that were corrected.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
## Changelog
@@ -292,6 +156,24 @@ directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
## Documentation
There is a growing amount of documentation located in the [docs](docs)
directory. This documentation is intended primarily for sysadmins running their
own Synapse instance, as well as developers interacting externally with
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
regarding Synapse's Admin API, which is used mostly by sysadmins and external
service developers.
New files added to both folders should be written in [Github-Flavoured
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
should be made to migrate existing documents to markdown where possible.
Some documentation also exists in [Synapse's Github
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
## Sign off
In order to have a concrete record that your contribution is intentional
@@ -358,36 +240,47 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
## Continuous integration and testing
# 10. Turn feedback into better code.
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
run a series of checks and tests against any PR which is opened against the
project; if your change breaks the build, this will be shown in GitHub, with
links to the build results. If your build fails, please try to fix the errors
and update your branch.
Once the Pull Request is opened, you will see a few things:
To run unit tests in a local development environment, you can use:
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
2. one or more of the developers will take a look at your Pull Request and offer feedback.
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
for SQLite-backed Synapse on Python 3.5.
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
(requires a running local PostgreSQL with access to create databases).
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
(requires Docker). Entirely self-contained, recommended if you don't want to
set up PostgreSQL yourself.
From this point, you should:
Docker images are available for running the integration tests (SyTest) locally,
see the [documentation in the SyTest repo](
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
information.
1. Look at the results of the CI pipeline.
- If there is any error, fix the error.
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
3. Create a new commit with the changes.
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
- Push this commits to your Pull Request.
4. Back to 1.
## Updating your pull request
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
If you decide to make changes to your pull request - perhaps to address issues
raised in a review, or to fix problems highlighted by [continuous
integration](#continuous-integration-and-testing) - just add new commits to your
branch, and push to GitHub. The pull request will automatically be updated.
# 11. Find a new issue.
Please **avoid** rebasing your branch, especially once the PR has been
reviewed: doing so makes it very difficult for a reviewer to see what has
changed since a previous review.
By now, you know the drill!
# Notes for maintainers on merging PRs etc
## Notes for maintainers on merging PRs etc
There are some notes for those with commit access to the project on how we
manage git [here](docs/dev/git.md).
# Conclusion
## Conclusion
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully

View File

@@ -20,10 +20,9 @@ recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.py
recursive-include tests *.pem
recursive-include tests *.p8
recursive-include tests *.crt
recursive-include tests *.key
include tests/http/ca.crt
include tests/http/ca.key
include tests/http/server.key
recursive-include synapse/res *
recursive-include synapse/static *.css

View File

@@ -183,9 +183,8 @@ Using a reverse proxy with Synapse
It is recommended to put a reverse proxy such as
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
`HAProxy <https://www.haproxy.org/>`_ or
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.

View File

@@ -85,51 +85,23 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
Upgrading to v1.29.0
====================
Requirement for X-Forwarded-Proto header
----------------------------------------
When using Synapse with a reverse proxy (in particular, when using the
`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
will log a warning on each received request.
To avoid the warning, administrators using a reverse proxy should ensure that
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
indicate the protocol used by the client. See the `reverse proxy documentation
<docs/reverse_proxy.md>`_, where the example configurations have been updated to
show how to set this header.
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
sets `X-Forwarded-Proto` by default.)
Upgrading to v1.27.0
====================
Changes to callback URI for OAuth2 / OpenID Connect and SAML2
-------------------------------------------------------------
Changes to callback URI for OAuth2 / OpenID Connect
---------------------------------------------------
This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
This version changes the URI used for callbacks from OAuth2 identity providers. If
your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
to the list of permitted "redirect URIs" at the identity provider.
* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
to the list of permitted "redirect URIs" at the identity provider.
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
Connect.
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
Connect.
* If your server is configured for single sign-on via a SAML2 identity provider, you will
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
"ACS location" (also known as "allowed callback URLs") at the identity provider.
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
provider uses this property to validate or otherwise identify Synapse, its configuration
will need to be updated to use the new URL. Alternatively you could create a new, separate
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
the existing "EntityDescriptor" as they were.
(Note: a similar change is being made for SAML2; in this case the old URI
``[synapse public baseurl]/_matrix/saml2`` is being deprecated, but will continue to
work, so no immediate changes are required for existing installations.)
Changes to HTML templates
-------------------------

1
changelog.d/9003.misc Normal file
View File

@@ -0,0 +1 @@
Fix 'object name reserved for internal use' errors with recent versions of SQLite.

1
changelog.d/9123.misc Normal file
View File

@@ -0,0 +1 @@
Add experimental support for running Synapse with PyPy.

1
changelog.d/9150.feature Normal file
View File

@@ -0,0 +1 @@
New API /_synapse/admin/rooms/{roomId}/context/{eventId}.

1
changelog.d/9240.misc Normal file
View File

@@ -0,0 +1 @@
Deny access to additional IP addresses by default.

1
changelog.d/9257.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix long-standing bug where sending email push would fail for rooms that the server had since left.

1
changelog.d/9291.doc Normal file
View File

@@ -0,0 +1 @@
Add note to `auto_join_rooms` config option explaining existing rooms must be publicly joinable.

1
changelog.d/9296.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix bug in Synapse 1.27.0rc1 which meant the "session expired" error page during SSO registration was badly formatted.

1
changelog.d/9299.misc Normal file
View File

@@ -0,0 +1 @@
Update the `Cursor` type hints to better match PEP 249.

1
changelog.d/9300.feature Normal file
View File

@@ -0,0 +1 @@
Further improvements to the user experience of registration via single sign-on.

1
changelog.d/9301.feature Normal file
View File

@@ -0,0 +1 @@
Further improvements to the user experience of registration via single sign-on.

1
changelog.d/9305.misc Normal file
View File

@@ -0,0 +1 @@
Add debug logging for SRV lookups. Contributed by @Bubu.

1
changelog.d/9307.misc Normal file
View File

@@ -0,0 +1 @@
Improve logging for OIDC login flow.

1
changelog.d/9308.doc Normal file
View File

@@ -0,0 +1 @@
Correct name of Synapse's service file in TURN howto.

1
changelog.d/9311.feature Normal file
View File

@@ -0,0 +1 @@
Add hook to spam checker modules that allow checking file uploads and remote downloads.

1
changelog.d/9317.doc Normal file
View File

@@ -0,0 +1 @@
Fix the braces in the `oidc_providers` section of the sample config.

1
changelog.d/9321.bugfix Normal file
View File

@@ -0,0 +1 @@
Assert a maximum length for the `client_secret` parameter for spec compliance.

1
changelog.d/9322.doc Normal file
View File

@@ -0,0 +1 @@
Update installation instructions on Fedora.

1
changelog.d/9326.misc Normal file
View File

@@ -0,0 +1 @@
Share the code for handling required attributes between the CAS and SAML handlers.

1
changelog.d/9333.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix additional errors when previewing URLs: "AttributeError 'NoneType' object has no attribute 'xpath'" and "ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.".

1
changelog.d/9361.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug causing Synapse to impose the wrong type constraints on fields when processing responses from appservices to `/_matrix/app/v1/thirdparty/user/{protocol}`.

1
changelog.d/9362.misc Normal file
View File

@@ -0,0 +1 @@
Clean up the code to load the metadata for OpenID Connect identity providers.

1
changelog.d/9376.feature Normal file
View File

@@ -0,0 +1 @@
Add support for receiving OpenID Connect authentication responses via form `POST`s rather than `GET`s.

1
changelog.d/9377.misc Normal file
View File

@@ -0,0 +1 @@
Convert tests to use `HomeserverTestCase`.

1
changelog.d/9381.misc Normal file
View File

@@ -0,0 +1 @@
Update the version of black used to 20.8b1.

1
changelog.d/9384.misc Normal file
View File

@@ -0,0 +1 @@
Allow OIDC config to override discovered values.

1
changelog.d/9391.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix bug where Synapse would occaisonally stop reconnecting after the connection was lost.

1
changelog.d/9394.misc Normal file
View File

@@ -0,0 +1 @@
Remove some dead code from the acceptance of room invites path.

1
changelog.d/9395.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a long-standing bug when upgrading a room: "TypeError: '>' not supported between instances of 'NoneType' and 'int'".

1
changelog.d/9396.misc Normal file
View File

@@ -0,0 +1 @@
Convert tests to use `HomeserverTestCase`.

1
changelog.d/9404.doc Normal file
View File

@@ -0,0 +1 @@
Update docs for using Gitea as OpenID provider.

1
changelog.d/9407.doc Normal file
View File

@@ -0,0 +1 @@
Document that pusher instances are shardable.

1
changelog.d/9423.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix building docker images for 32-bit ARM.

View File

@@ -58,10 +58,10 @@ trap "rm -r $tmpdir" EXIT
cp -r tests "$tmpdir"
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
# build the config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
@@ -87,7 +87,7 @@ PYTHONPATH="$tmpdir" \
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.

22
debian/changelog vendored
View File

@@ -1,25 +1,3 @@
matrix-synapse-py3 (1.30.0) stable; urgency=medium
* New synapse release 1.30.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
matrix-synapse-py3 (1.29.0) stable; urgency=medium
[ Jonathan de Jong ]
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
[ Synapse Packaging team ]
* New synapse release 1.29.0.
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
matrix-synapse-py3 (1.28.0) stable; urgency=medium
* New synapse release 1.28.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
matrix-synapse-py3 (1.27.0) stable; urgency=medium
[ Dan Callahan ]

2
debian/synctl.1 vendored
View File

@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
.
.nf
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
.
.fi
.

2
debian/synctl.ronn vendored
View File

@@ -41,7 +41,7 @@ process.
Configuration file may be generated as follows:
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
## ENVIRONMENT

View File

@@ -12,11 +12,13 @@
#
ARG PYTHON_VERSION=3.8
ARG BASE_IMAGE=docker.io/python:${PYTHON_VERSION}-slim
ARG CARGO_NET_OFFLINE=false
###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
FROM ${BASE_IMAGE} as builder
# install the OS build deps
RUN apt-get update && apt-get install -y \
@@ -32,9 +34,16 @@ RUN apt-get update && apt-get install -y \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/*
ENV CARGO_NET_OFFLINE=${CARGO_NET_OFFLINE}
# Build dependencies that are not available as wheels, to speed up rebuilds
RUN pip install --prefix="/install" --no-warn-script-location \
cryptography \
lxml
RUN pip install --prefix="/install" --no-warn-script-location \
cryptography
RUN pip install --prefix="/install" --no-warn-script-location \
frozendict \
jaeger-client \
opentracing \
@@ -69,7 +78,6 @@ RUN apt-get update && apt-get install -y \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local

View File

@@ -0,0 +1,21 @@
# A docker file that caches the cargo index for the cryptography deps. This is
# mainly useful for multi-arch builds where fetching the index from the internet
# fails for 32bit archs built on 64 bit platforms.
ARG PYTHON_VERSION=3.8
FROM --platform=$BUILDPLATFORM docker.io/python:${PYTHON_VERSION}-slim as builder
RUN apt-get update && apt-get install -y \
rustc \
&& rm -rf /var/lib/apt/lists/*
RUN pip download --no-binary cryptography --no-deps cryptography
RUN tar -xf cryptography*.tar.gz --wildcards cryptography*/src/rust/
RUN cd cryptography*/src/rust && cargo fetch
FROM docker.io/python:${PYTHON_VERSION}-slim
COPY --from=builder /root/.cargo /root/.cargo

View File

@@ -11,6 +11,7 @@ The image also does *not* provide a TURN server.
By default, the image expects a single volume, located at ``/data``, that will hold:
* configuration files;
* temporary files during uploads;
* uploaded media and thumbnails;
* the SQLite database if you do not configure postgres;
* the appservices configuration.
@@ -204,8 +205,3 @@ healthcheck:
timeout: 10s
retries: 3
```
## Using jemalloc
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse [README](../README.md)

View File

@@ -89,6 +89,7 @@ federation_rc_concurrent: 3
## Files ##
media_store_path: "/data/media"
uploads_path: "/data/uploads"
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
max_image_pixels: "32M"
dynamic_thumbnails: false

View File

@@ -3,7 +3,6 @@
import codecs
import glob
import os
import platform
import subprocess
import sys
@@ -214,13 +213,6 @@ def main(args, environ):
if "-m" not in args:
args = ["-m", synapse_worker] + args
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log("Could not find %s, will not use" % (jemallocpath,))
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
@@ -256,9 +248,9 @@ running with 'migrate_config'. See the README for more details.
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
os.execv("/usr/sbin/gosu", args)
else:
os.execve("/usr/local/bin/python", args, environ)
os.execv("/usr/local/bin/python", args)
if __name__ == "__main__":

View File

@@ -1,7 +1,5 @@
# Contents
- [Querying media](#querying-media)
* [List all media in a room](#list-all-media-in-a-room)
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
- [List all media in a room](#list-all-media-in-a-room)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
@@ -12,11 +10,7 @@
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
- [Purge Remote Media API](#purge-remote-media-api)
# Querying media
These APIs allow extracting media information from the homeserver.
## List all media in a room
# List all media in a room
This API gets a list of known media in a room.
However, it only shows media from unencrypted events or rooms.
@@ -42,12 +36,6 @@ The API returns a JSON body like the following:
}
```
## List all media uploaded by a user
Listing all media that has been uploaded by a local user can be achieved through
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
Admin API.
# Quarantine media
Quarantining media means that it is marked as inaccessible by users. It applies

View File

@@ -29,9 +29,8 @@ It returns a JSON body like the following:
}
],
"avatar_url": "<avatar_url>",
"admin": 0,
"deactivated": 0,
"shadow_banned": 0,
"admin": false,
"deactivated": false,
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
"creation_ts": 1560432506,
"appservice_id": null,
@@ -151,7 +150,6 @@ A JSON body is returned with the following shape:
"admin": 0,
"user_type": null,
"deactivated": 0,
"shadow_banned": 0,
"displayname": "<User One>",
"avatar_url": null
}, {
@@ -160,7 +158,6 @@ A JSON body is returned with the following shape:
"admin": 1,
"user_type": null,
"deactivated": 0,
"shadow_banned": 0,
"displayname": "<User Two>",
"avatar_url": "<avatar_url>"
}
@@ -265,7 +262,7 @@ The following actions are performed when deactivating an user:
- Reject all pending invites
- Remove all account validity information related to the user
The following additional actions are performed during deactivation if ``erase``
The following additional actions are performed during deactivation if``erase``
is set to ``true``:
- Remove the user's display name
@@ -379,12 +376,11 @@ The following fields are returned in the JSON response body:
- ``total`` - Number of rooms.
List media of a user
====================
List media of an user
================================
Gets a list of all local media that a specific ``user_id`` has created.
By default, the response is ordered by descending creation date and ascending media ID.
The newest media is on top. You can change the order with parameters
``order_by`` and ``dir``.
The response is ordered by creation date descending and media ID descending.
The newest media is on top.
The API is::
@@ -441,35 +437,6 @@ The following parameters should be set in the URL:
denoting the offset in the returned results. This should be treated as an opaque value and
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
Defaults to ``0``.
- ``order_by`` - The method by which to sort the returned list of media.
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
which guarantees a stable ordering. Valid values are:
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
Smallest to largest. This is the default.
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
Smallest to largest.
- ``media_length`` - Media are ordered by length of the media in bytes.
Smallest to largest.
- ``media_type`` - Media are ordered alphabetically by MIME-type.
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
initiated the quarantine request for this media.
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
from quarantining.
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
Caution. The database only has indexes on the columns ``media_id``,
``user_id`` and ``created_ts``. This means that if a different sort order is used
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
database, especially for large environments.
**Response**

View File

@@ -226,7 +226,7 @@ Synapse config:
oidc_providers:
- idp_id: github
idp_name: Github
idp_brand: "github" # optional: styling hint for clients
idp_brand: "org.matrix.github" # optional: styling hint for clients
discover: false
issuer: "https://github.com/"
client_id: "your-client-id" # TO BE FILLED
@@ -252,7 +252,7 @@ oidc_providers:
oidc_providers:
- idp_id: google
idp_name: Google
idp_brand: "google" # optional: styling hint for clients
idp_brand: "org.matrix.google" # optional: styling hint for clients
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
@@ -299,7 +299,7 @@ Synapse config:
oidc_providers:
- idp_id: gitlab
idp_name: Gitlab
idp_brand: "gitlab" # optional: styling hint for clients
idp_brand: "org.matrix.gitlab" # optional: styling hint for clients
issuer: "https://gitlab.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
@@ -334,7 +334,7 @@ Synapse config:
```yaml
- idp_id: facebook
idp_name: Facebook
idp_brand: "facebook" # optional: styling hint for clients
idp_brand: "org.matrix.facebook" # optional: styling hint for clients
discover: false
issuer: "https://facebook.com"
client_id: "your-client-id" # TO BE FILLED
@@ -386,7 +386,7 @@ oidc_providers:
config:
subject_claim: "id"
localpart_template: "{{ user.login }}"
display_name_template: "{{ user.full_name }}"
display_name_template: "{{ user.full_name }}"
```
### XWiki
@@ -401,7 +401,8 @@ oidc_providers:
idp_name: "XWiki"
issuer: "https://myxwikihost/xwiki/oidc/"
client_id: "your-client-id" # TO BE FILLED
client_auth_method: none
# Needed until https://github.com/matrix-org/synapse/issues/9212 is fixed
client_secret: "dontcare"
scopes: ["openid", "profile"]
user_profile_method: "userinfo_endpoint"
user_mapping_provider:
@@ -409,40 +410,3 @@ oidc_providers:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
```
## Apple
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
You will need to create a new "Services ID" for SiWA, and create and download a
private key with "SiWA" enabled.
As well as the private key file, you will need:
* Client ID: the "identifier" you gave the "Services ID"
* Team ID: a 10-character ID associated with your developer account.
* Key ID: the 10-character identifier for the key.
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
documentation on setting up SiWA.
The synapse config will look like this:
```yaml
- idp_id: apple
idp_name: Apple
issuer: "https://appleid.apple.com"
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
client_auth_method: "client_secret_post"
client_secret_jwt_key:
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
jwt_header:
alg: ES256
kid: "KEYIDCODE" # Set to the 10-char Key ID
jwt_payload:
iss: TEAMIDCODE # Set to the 10-char Team ID
scopes: ["name", "email", "openid"]
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
user_mapping_provider:
config:
email_template: "{{ user.email }}"
```

View File

@@ -3,31 +3,30 @@
It is recommended to put a reverse proxy such as
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
[HAProxy](https://www.haproxy.org/) or
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
of doing so is that it means that you can expose the default https port
(443) to Matrix clients without needing to run Synapse with root
privileges.
You should configure your reverse proxy to forward requests to `/_matrix` or
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
`X-Forwarded-Proto` request headers.
You should remember that Matrix clients and other Matrix servers do not
necessarily need to connect to your server via the same server name or
port. Indeed, clients will use port 443 by default, whereas servers default to
port 8448. Where these are different, we refer to the 'client port' and the
'federation port'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
the requested URI in any way (for example, by decoding `%xx` escapes).
Beware that Apache *will* canonicalise URIs unless you specify
`nocanon`.
When setting up a reverse proxy, remember that Matrix clients and other
Matrix servers do not necessarily need to connect to your server via the
same server name or port. Indeed, clients will use port 443 by default,
whereas servers default to port 8448. Where these are different, we
refer to the 'client port' and the 'federation port'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
Endpoints that are part of the standardised Matrix specification are
located under `/_matrix`, whereas endpoints specific to Synapse are
located under `/_synapse/client`.
Let's assume that we expect clients to connect to our server at
`https://matrix.example.com`, and other servers to connect at
`https://example.com:8448`. The following sections detail the configuration of
@@ -41,21 +40,18 @@ the reverse proxy and the homeserver.
```
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
listen 443 ssl;
listen [::]:443 ssl;
# For the federation port
listen 8448 ssl http2 default_server;
listen [::]:8448 ssl http2 default_server;
listen 8448 ssl default_server;
listen [::]:8448 ssl default_server;
server_name matrix.example.com;
location ~* ^(\/_matrix|\/_synapse\/client) {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
@@ -106,7 +102,6 @@ example.com:8448 {
SSLEngine on
ServerName matrix.example.com;
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
@@ -118,7 +113,6 @@ example.com:8448 {
SSLEngine on
ServerName example.com;
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
@@ -140,9 +134,6 @@ example.com:8448 {
```
frontend https
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-For %[src]
# Matrix client traffic
acl matrix-host hdr(host) -i matrix.example.com
@@ -153,62 +144,12 @@ frontend https
frontend matrix-federation
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-For %[src]
default_backend matrix
backend matrix
server matrix 127.0.0.1:8008
```
### Relayd
```
table <webserver> { 127.0.0.1 }
table <matrixserver> { 127.0.0.1 }
http protocol "https" {
tls { no tlsv1.0, ciphers "HIGH" }
tls keypair "example.com"
match header set "X-Forwarded-For" value "$REMOTE_ADDR"
match header set "X-Forwarded-Proto" value "https"
# set CORS header for .well-known/matrix/server, .well-known/matrix/client
# httpd does not support setting headers, so do it here
match request path "/.well-known/matrix/*" tag "matrix-cors"
match response tagged "matrix-cors" header set "Access-Control-Allow-Origin" value "*"
pass quick path "/_matrix/*" forward to <matrixserver>
pass quick path "/_synapse/client/*" forward to <matrixserver>
# pass on non-matrix traffic to webserver
pass forward to <webserver>
}
relay "https_traffic" {
listen on egress port 443 tls
protocol "https"
forward to <matrixserver> port 8008 check tcp
forward to <webserver> port 8080 check tcp
}
http protocol "matrix" {
tls { no tlsv1.0, ciphers "HIGH" }
tls keypair "example.com"
block
pass quick path "/_matrix/*" forward to <matrixserver>
pass quick path "/_synapse/client/*" forward to <matrixserver>
}
relay "matrix_federation" {
listen on egress port 8448 tls
protocol "matrix"
forward to <matrixserver> port 8008 check tcp
}
```
## Homeserver Configuration
You will also want to set `bind_addresses: ['127.0.0.1']` and

View File

@@ -89,7 +89,8 @@ pid_file: DATADIR/homeserver.pid
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. Defaults to
# 'false'. Note that profile data is also available via the federation
# API, unless allow_profile_lookup_over_federation is set to false.
# API, so this setting is of limited value if federation is enabled on
# the server.
#
#require_auth_for_profile_requests: true
@@ -100,14 +101,6 @@ pid_file: DATADIR/homeserver.pid
#
#limit_profile_requests_to_users_who_share_rooms: true
# Uncomment to prevent a user's profile data from being retrieved and
# displayed in a room until they have joined it. By default, a user's
# profile data is included in an invite event, regardless of the values
# of the above two settings, and whether or not the users share a server.
# Defaults to 'true'.
#
#include_profile_data_on_invite: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
@@ -706,12 +699,6 @@ acme:
# - matrix.org
# - example.com
# Uncomment to disable profile lookup over federation. By default, the
# Federation API allows other homeservers to obtain profile data of any user
# on this homeserver. Defaults to 'true'.
#
#allow_profile_lookup_over_federation: false
## Caching ##
@@ -1779,26 +1766,7 @@ saml2_config:
#
# client_id: Required. oauth2 client id to use.
#
# client_secret: oauth2 client secret to use. May be omitted if
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
#
# client_secret_jwt_key: Alternative to client_secret: details of a key used
# to create a JSON Web Token to be used as an OAuth2 client secret. If
# given, must be a dictionary with the following properties:
#
# key: a pem-encoded signing key. Must be a suitable key for the
# algorithm specified. Required unless 'key_file' is given.
#
# key_file: the path to file containing a pem-encoded signing key file.
# Required unless 'key' is given.
#
# jwt_header: a dictionary giving properties to include in the JWT
# header. Must include the key 'alg', giving the algorithm used to
# sign the JWT, such as "ES256", using the JWA identifiers in
# RFC7518.
#
# jwt_payload: an optional dictionary giving properties to include in
# the JWT payload. Normally this should include an 'iss' key.
# client_secret: Required. oauth2 client secret to use.
#
# client_auth_method: auth method to use when exchanging the token. Valid
# values are 'client_secret_basic' (default), 'client_secret_post' and
@@ -1919,7 +1887,7 @@ oidc_providers:
#
#- idp_id: github
# idp_name: Github
# idp_brand: github
# idp_brand: org.matrix.github
# discover: false
# issuer: "https://github.com/"
# client_id: "your-client-id" # TO BE FILLED
@@ -2260,8 +2228,8 @@ password_config:
#require_uppercase: true
ui_auth:
# The amount of time to allow a user-interactive authentication session
# to be active.
# The number of milliseconds to allow a user-interactive authentication
# session to be active.
#
# This defaults to 0, meaning the user is queried for their credentials
# before every action, but this can be overridden to allow a single
@@ -2272,7 +2240,7 @@ ui_auth:
# Uncomment below to allow for credential validation to last for 15
# seconds.
#
#session_timeout: "15s"
#session_timeout: 15000
# Configuration for sending emails from Synapse.
@@ -2562,35 +2530,19 @@ spam_checker:
# User Directory configuration
#
user_directory:
# Defines whether users can search the user directory. If false then
# empty responses are returned to all queries. Defaults to true.
#
# Uncomment to disable the user directory.
#
#enabled: false
# Defines whether to search all users visible to your HS when searching
# the user directory, rather than limiting to users visible in public
# rooms. Defaults to false.
#
# If you set it true, you'll have to rebuild the user_directory search
# indexes, see:
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
#
# Uncomment to return search results containing all known users, even if that
# user does not share a room with the requester.
#
#search_all_users: true
# Defines whether to prefer local users in search query results.
# If True, local users are more likely to appear above remote users
# when searching the user directory. Defaults to false.
#
# Uncomment to prefer local over remote users in user directory search
# results.
#
#prefer_local_users: true
# 'enabled' defines whether users can search the user directory. If
# false then empty responses are returned to all queries. Defaults to
# true.
#
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to
# rebuild the user_directory search indexes, see
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
#
#user_directory:
# enabled: true
# search_all_users: false
# User Consent configuration
@@ -2645,20 +2597,19 @@ user_directory:
# Settings for local room and user statistics collection. See
# docs/room_and_user_statistics.md.
# Local statistics collection. Used in populating the room directory.
#
stats:
# Uncomment the following to disable room and user statistics. Note that doing
# so may cause certain features (such as the room directory) not to work
# correctly.
#
#enabled: false
# The size of each timeslice in the room_stats_historical and
# user_stats_historical tables, as a time period. Defaults to "1d".
#
#bucket_size: 1h
# 'bucket_size' controls how large each statistics timeslice is. It can
# be defined in a human readable short form -- e.g. "1d", "1y".
#
# 'retention' controls how long historical statistics will be kept for.
# It can be defined in a human readable short form -- e.g. "1d", "1y".
#
#
#stats:
# enabled: true
# bucket_size: 1d
# retention: 1y
# Server Notices room configuration

View File

@@ -14,7 +14,6 @@ The Python class is instantiated with two objects:
* An instance of `synapse.module_api.ModuleApi`.
It then implements methods which return a boolean to alter behavior in Synapse.
All the methods must be defined.
There's a generic method for checking every event (`check_event_for_spam`), as
well as some specific methods:
@@ -25,18 +24,13 @@ well as some specific methods:
* `user_may_publish_room`
* `check_username_for_spam`
* `check_registration_for_spam`
* `check_media_file_for_spam`
The details of each of these methods (as well as their inputs and outputs)
The details of the each of these methods (as well as their inputs and outputs)
are documented in the `synapse.events.spamcheck.SpamChecker` class.
The `ModuleApi` class provides a way for the custom spam checker class to
call back into the homeserver internals.
Additionally, a `parse_config` method is mandatory and receives the plugin config
dictionary. After parsing, It must return an object which will be
passed to `__init__` later.
### Example
```python
@@ -47,10 +41,6 @@ class ExampleSpamChecker:
self.config = config
self.api = api
@staticmethod
def parse_config(config):
return config
async def check_event_for_spam(self, foo):
return False # allow all events
@@ -69,13 +59,7 @@ class ExampleSpamChecker:
async def check_username_for_spam(self, user_profile):
return False # allow all usernames
async def check_registration_for_spam(
self,
email_threepid,
username,
request_info,
auth_provider_id,
):
async def check_registration_for_spam(self, email_threepid, username, request_info):
return RegistrationBehaviour.ALLOW # allow all registrations
async def check_media_file_for_spam(self, file_wrapper, file_info):

View File

@@ -4,7 +4,6 @@ AssertPathExists=/etc/matrix-synapse/workers/%i.yaml
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
ReloadPropagatedFrom=matrix-synapse.target
# if this is started at the same time as the main, let the main process start
# first, to initialise the database schema.

View File

@@ -3,7 +3,6 @@ Description=Synapse master
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
ReloadPropagatedFrom=matrix-synapse.target
[Service]
Type=notify

View File

@@ -220,6 +220,10 @@ Asks the server for the current position of all streams.
Acknowledge receipt of some federation data
#### REMOVE_PUSHER (C)
Inform the server a pusher should be removed
### REMOTE_SERVER_UP (S, C)
Inform other processes that a remote server may have come back online.

View File

@@ -276,8 +276,7 @@ using):
Ensure that all SSO logins go to a single process.
For multiple workers not handling the SSO endpoints properly, see
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
[#9427](https://github.com/matrix-org/synapse/issues/9427).
[#7530](https://github.com/matrix-org/synapse/issues/7530).
Note that a HTTP listener with `client` and `federation` resources must be
configured in the `worker_listeners` option in the worker config.

View File

@@ -23,7 +23,6 @@ files =
synapse/events/validator.py,
synapse/events/spamcheck.py,
synapse/federation,
synapse/groups,
synapse/handlers,
synapse/http/client.py,
synapse/http/federation/matrix_federation_agent.py,
@@ -69,7 +68,6 @@ files =
synapse/util/async_helpers.py,
synapse/util/caches,
synapse/util/metrics.py,
synapse/util/macaroons.py,
synapse/util/stringutils.py,
tests/replication,
tests/test_utils,
@@ -117,6 +115,9 @@ ignore_missing_imports = True
[mypy-saml2.*]
ignore_missing_imports = True
[mypy-unpaddedbase64]
ignore_missing_imports = True
[mypy-canonicaljson]
ignore_missing_imports = True

View File

@@ -2,14 +2,9 @@
# Find linting errors in Synapse's default config file.
# Exits with 0 if there are no problems, or another code otherwise.
# cd to the root of the repository
cd `dirname $0`/..
# Restore backup of sample config upon script exit
trap "mv docs/sample_config.yaml.bak docs/sample_config.yaml" EXIT
# Fix non-lowercase true/false values
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
rm docs/sample_config.yaml.bak
# Check if anything changed
diff docs/sample_config.yaml docs/sample_config.yaml.bak
git diff --exit-code docs/sample_config.yaml

View File

@@ -22,7 +22,7 @@ import logging
import sys
import time
import traceback
from typing import Dict, Iterable, Optional, Set
from typing import Dict, Optional, Set
import yaml
@@ -47,7 +47,6 @@ from synapse.storage.databases.main.events_bg_updates import (
from synapse.storage.databases.main.media_repository import (
MediaRepositoryBackgroundUpdateStore,
)
from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
find_max_generated_user_id_localpart,
@@ -178,7 +177,6 @@ class Store(
UserDirectoryBackgroundUpdateStore,
EndToEndKeyBackgroundStore,
StatsStore,
PusherWorkerStore,
):
def execute(self, f, *args, **kwargs):
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
@@ -631,13 +629,7 @@ class Porter(object):
await self._setup_state_group_id_seq()
await self._setup_user_id_seq()
await self._setup_events_stream_seqs()
await self._setup_sequence(
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
)
await self._setup_sequence(
"account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data"))
await self._setup_sequence("receipts_sequence", ("receipts_linearized", ))
await self._setup_auth_chain_sequence()
await self._setup_device_inbox_seq()
# Step 3. Get tables.
self.progress.set_state("Fetching tables")
@@ -862,7 +854,7 @@ class Porter(object):
return done, remaining + done
async def _setup_state_group_id_seq(self) -> None:
async def _setup_state_group_id_seq(self):
curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
)
@@ -876,7 +868,7 @@ class Porter(object):
await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r)
async def _setup_user_id_seq(self) -> None:
async def _setup_user_id_seq(self):
curr_id = await self.sqlite_store.db_pool.runInteraction(
"setup_user_id_seq", find_max_generated_user_id_localpart
)
@@ -885,9 +877,9 @@ class Porter(object):
next_id = curr_id + 1
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
async def _setup_events_stream_seqs(self) -> None:
async def _setup_events_stream_seqs(self):
"""Set the event stream sequences to the correct values.
"""
@@ -916,46 +908,35 @@ class Porter(object):
(curr_backward_id + 1,),
)
await self.postgres_store.db_pool.runInteraction(
return await self.postgres_store.db_pool.runInteraction(
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
)
async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
"""Set a sequence to the correct value.
async def _setup_device_inbox_seq(self):
"""Set the device inbox sequence to the correct value.
"""
current_stream_ids = []
for stream_id_table in stream_id_tables:
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table=stream_id_table,
keyvalues={},
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
)
current_stream_ids.append(max_stream_id)
next_id = max(current_stream_ids) + 1
def r(txn):
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
txn.execute(sql + " %s", (next_id, ))
await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
async def _setup_auth_chain_sequence(self) -> None:
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
curr_local_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="device_inbox",
keyvalues={},
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
)
curr_federation_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
table="device_federation_outbox",
keyvalues={},
retcol="COALESCE(MAX(stream_id), 1)",
allow_none=True,
)
next_id = max(curr_local_id, curr_federation_id) + 1
def r(txn):
txn.execute(
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
(curr_chain_id,),
"ALTER SEQUENCE device_inbox_sequence RESTART WITH %s", (next_id,)
)
await self.postgres_store.db_pool.runInteraction(
"_setup_event_auth_chain_id", r,
)
return self.postgres_store.db_pool.runInteraction("_setup_device_inbox_seq", r)
##############################################

View File

@@ -3,7 +3,6 @@ test_suite = tests
[check-manifest]
ignore =
.git-blame-ignore-revs
contrib
contrib/*
docs/*

View File

@@ -102,7 +102,7 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
"flake8",
]
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.11"]
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"]
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.

View File

@@ -17,9 +17,7 @@
"""
from typing import Any, List, Optional, Type, Union
from twisted.internet import protocol
class RedisProtocol(protocol.Protocol):
class RedisProtocol:
def publish(self, channel: str, message: bytes): ...
async def ping(self) -> None: ...
async def set(
@@ -54,7 +52,7 @@ def lazyConnection(
class ConnectionHandler: ...
class RedisFactory(protocol.ReconnectingClientFactory):
class RedisFactory:
continueTrying: bool
handler: RedisProtocol
pool: List[RedisProtocol]

View File

@@ -48,7 +48,7 @@ try:
except ImportError:
pass
__version__ = "1.30.0"
__version__ = "1.27.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View File

@@ -39,7 +39,6 @@ from synapse.logging import opentracing as opentracing
from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import StateMap, UserID
from synapse.util.caches.lrucache import LruCache
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
@@ -164,7 +163,7 @@ class Auth:
async def get_user_by_req(
self,
request: SynapseRequest,
request: Request,
allow_guest: bool = False,
rights: str = "access",
allow_expired: bool = False,
@@ -409,7 +408,7 @@ class Auth:
raise _InvalidMacaroonException()
try:
user_id = get_value_from_macaroon(macaroon, "user_id")
user_id = self.get_user_id_from_macaroon(macaroon)
guest = False
for caveat in macaroon.caveats:
@@ -417,12 +416,7 @@ class Auth:
guest = True
self.validate_macaroon(macaroon, rights, user_id=user_id)
except (
pymacaroons.exceptions.MacaroonException,
KeyError,
TypeError,
ValueError,
):
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise InvalidClientTokenError("Invalid macaroon passed.")
if rights == "access":
@@ -430,6 +424,27 @@ class Auth:
return user_id, guest
def get_user_id_from_macaroon(self, macaroon):
"""Retrieve the user_id given by the caveats on the macaroon.
Does *not* validate the macaroon.
Args:
macaroon (pymacaroons.Macaroon): The macaroon to validate
Returns:
(str) user id
Raises:
InvalidClientCredentialsError if there is no user_id caveat in the
macaroon
"""
user_prefix = "user_id = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):
return caveat.caveat_id[len(user_prefix) :]
raise InvalidClientTokenError("No user caveat in macaroon")
def validate_macaroon(self, macaroon, type_string, user_id):
"""
validate that a Macaroon is understood by and was signed by this server.
@@ -450,13 +465,21 @@ class Auth:
v.satisfy_exact("type = " + type_string)
v.satisfy_exact("user_id = %s" % user_id)
v.satisfy_exact("guest = true")
satisfy_expiry(v, self.clock.time_msec)
v.satisfy_general(self._verify_expiry)
# access_tokens include a nonce for uniqueness: any value is acceptable
v.satisfy_general(lambda c: c.startswith("nonce = "))
v.verify(macaroon, self._macaroon_secret_key)
def _verify_expiry(self, caveat):
prefix = "time < "
if not caveat.startswith(prefix):
return False
expiry = int(caveat[len(prefix) :])
now = self.hs.get_clock().time_msec()
return now < expiry
def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
token = self.get_access_token_from_request(request)
service = self.store.get_app_service_by_token(token)

View File

@@ -27,11 +27,6 @@ MAX_ALIAS_LENGTH = 255
# the maximum length for a user id is 255 characters
MAX_USERID_LENGTH = 255
# The maximum length for a group id is 255 characters
MAX_GROUPID_LENGTH = 255
MAX_GROUP_CATEGORYID_LENGTH = 255
MAX_GROUP_ROLEID_LENGTH = 255
class Membership:
@@ -98,12 +93,9 @@ class EventTypes:
Retention = "m.room.retention"
Dummy = "org.matrix.dummy_event"
class EduTypes:
Presence = "m.presence"
RoomKeyRequest = "m.room_key_request"
Dummy = "org.matrix.dummy_event"
class RejectedReason:

View File

@@ -14,7 +14,7 @@
# limitations under the License.
from collections import OrderedDict
from typing import Hashable, Optional, Tuple
from typing import Any, Optional, Tuple
from synapse.api.errors import LimitExceededError
from synapse.types import Requester
@@ -42,9 +42,7 @@ class Ratelimiter:
# * How many times an action has occurred since a point in time
# * The point in time
# * The rate_hz of this particular entry. This can vary per request
self.actions = (
OrderedDict()
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
self.actions = OrderedDict() # type: OrderedDict[Any, Tuple[float, int, float]]
def can_requester_do_action(
self,
@@ -84,7 +82,7 @@ class Ratelimiter:
def can_do_action(
self,
key: Hashable,
key: Any,
rate_hz: Optional[float] = None,
burst_count: Optional[int] = None,
update: bool = True,
@@ -177,7 +175,7 @@ class Ratelimiter:
def ratelimit(
self,
key: Hashable,
key: Any,
rate_hz: Optional[float] = None,
burst_count: Optional[int] = None,
update: bool = True,

View File

@@ -17,6 +17,8 @@ import sys
from synapse import python_dependencies # noqa: E402
sys.dont_write_bytecode = True
logger = logging.getLogger(__name__)
try:

View File

@@ -210,9 +210,7 @@ def start(config_options):
config.update_user_directory = False
config.run_background_tasks = False
config.start_pushers = False
config.pusher_shard_config.instances = []
config.send_federation = False
config.federation_shard_config.instances = []
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts

View File

@@ -23,7 +23,6 @@ from typing_extensions import ContextManager
from twisted.internet import address
from twisted.web.resource import IResource
from twisted.web.server import Request
import synapse
import synapse.events
@@ -191,7 +190,7 @@ class KeyUploadServlet(RestServlet):
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
async def on_POST(self, request: Request, device_id: Optional[str]):
async def on_POST(self, request, device_id):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
@@ -224,12 +223,10 @@ class KeyUploadServlet(RestServlet):
header: request.requestHeaders.getRawHeaders(header, [])
for header in (b"Authorization", b"User-Agent")
}
# Add the previous hop to the X-Forwarded-For header.
# Add the previous hop the the X-Forwarded-For header.
x_forwarded_for = request.requestHeaders.getRawHeaders(
b"X-Forwarded-For", []
)
# we use request.client here, since we want the previous hop, not the
# original client (as returned by request.getClientAddress()).
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
previous_host = request.client.host.encode("ascii")
# If the header exists, add to the comma-separated list of the first
@@ -242,14 +239,6 @@ class KeyUploadServlet(RestServlet):
x_forwarded_for = [previous_host]
headers[b"X-Forwarded-For"] = x_forwarded_for
# Replicate the original X-Forwarded-Proto header. Note that
# XForwardedForRequest overrides isSecure() to give us the original protocol
# used by the client, as opposed to the protocol used by our upstream proxy
# - which is what we want here.
headers[b"X-Forwarded-Proto"] = [
b"https" if request.isSecure() else b"http"
]
try:
result = await self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
@@ -656,6 +645,9 @@ class GenericWorkerServer(HomeServer):
self.get_tcp_replication().start_replication(self)
async def remove_pusher(self, app_id, push_key, user_id):
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
@cache_in_self
def get_replication_data_handler(self):
return GenericWorkerReplicationHandler(self)
@@ -930,6 +922,22 @@ def start(config_options):
# For other worker types we force this to off.
config.appservice.notify_appservices = False
if config.worker_app == "synapse.app.pusher":
if config.server.start_pushers:
sys.stderr.write(
"\nThe pushers must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``start_pushers: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.server.start_pushers = True
else:
# For other worker types we force this to off.
config.server.start_pushers = False
if config.worker_app == "synapse.app.user_dir":
if config.server.update_user_directory:
sys.stderr.write(
@@ -946,6 +954,22 @@ def start(config_options):
# For other worker types we force this to off.
config.server.update_user_directory = False
if config.worker_app == "synapse.app.federation_sender":
if config.worker.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``send_federation: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.worker.send_federation = True
else:
# For other worker types we force this to off.
config.worker.send_federation = False
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
hs = GenericWorkerServer(

View File

@@ -90,7 +90,7 @@ class ApplicationServiceApi(SimpleHttpClient):
self.clock = hs.get_clock()
self.protocol_meta_cache = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
) # type: ResponseCache[Tuple[str, str]]
async def query_user(self, service, user_id):

View File

@@ -21,7 +21,7 @@ import os
from collections import OrderedDict
from hashlib import sha256
from textwrap import dedent
from typing import Any, Iterable, List, MutableMapping, Optional, Union
from typing import Any, Iterable, List, MutableMapping, Optional
import attr
import jinja2
@@ -147,20 +147,7 @@ class Config:
return int(value) * size
@staticmethod
def parse_duration(value: Union[str, int]) -> int:
"""Convert a duration as a string or integer to a number of milliseconds.
If an integer is provided it is treated as milliseconds and is unchanged.
String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.
No suffix is treated as milliseconds.
Args:
value: The duration to parse.
Returns:
The number of milliseconds in the duration.
"""
def parse_duration(value):
if isinstance(value, int):
return value
second = 1000
@@ -212,8 +199,9 @@ class Config:
@classmethod
def read_file(cls, file_path, config_name):
"""Deprecated: call read_file directly"""
return read_file(file_path, (config_name,))
cls.check_file(file_path, config_name)
with open(file_path) as file_stream:
return file_stream.read()
def read_template(self, filename: str) -> jinja2.Template:
"""Load a template file from disk.
@@ -843,23 +831,22 @@ class ShardedWorkerHandlingConfig:
def should_handle(self, instance_name: str, key: str) -> bool:
"""Whether this instance is responsible for handling the given key."""
# If no instances are defined we assume some other worker is handling
# this.
if not self.instances:
return False
# If multiple instances are not defined we always return true
if not self.instances or len(self.instances) == 1:
return True
return self._get_instance(key) == instance_name
return self.get_instance(key) == instance_name
def _get_instance(self, key: str) -> str:
def get_instance(self, key: str) -> str:
"""Get the instance responsible for handling the given key.
Note: For federation sending and pushers the config for which instance
is sending is known only to the sender instance, so we don't expose this
method by default.
Note: For things like federation sending the config for which instance
is sending is known only to the sender instance if there is only one.
Therefore `should_handle` should be used where possible.
"""
if not self.instances:
raise Exception("Unknown worker")
return "master"
if len(self.instances) == 1:
return self.instances[0]
@@ -876,52 +863,4 @@ class ShardedWorkerHandlingConfig:
return self.instances[remainder]
@attr.s
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
"""A version of `ShardedWorkerHandlingConfig` that is used for config
options where all instances know which instances are responsible for the
sharded work.
"""
def __attrs_post_init__(self):
# We require that `self.instances` is non-empty.
if not self.instances:
raise Exception("Got empty list of instances for shard config")
def get_instance(self, key: str) -> str:
"""Get the instance responsible for handling the given key."""
return self._get_instance(key)
def read_file(file_path: Any, config_path: Iterable[str]) -> str:
"""Check the given file exists, and read it into a string
If it does not, emit an error indicating the problem
Args:
file_path: the file to be read
config_path: where in the configuration file_path came from, so that a useful
error can be emitted if it does not exist.
Returns:
content of the file.
Raises:
ConfigError if there is a problem reading the file.
"""
if not isinstance(file_path, str):
raise ConfigError("%r is not a string", config_path)
try:
os.stat(file_path)
with open(file_path) as file_stream:
return file_stream.read()
except OSError as e:
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
__all__ = [
"Config",
"RootConfig",
"ShardedWorkerHandlingConfig",
"RoutableShardedWorkerHandlingConfig",
"read_file",
]
__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]

View File

@@ -149,8 +149,4 @@ class ShardedWorkerHandlingConfig:
instances: List[str]
def __init__(self, instances: List[str]) -> None: ...
def should_handle(self, instance_name: str, key: str) -> bool: ...
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
def get_instance(self, key: str) -> str: ...
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...

View File

@@ -37,9 +37,7 @@ class AuthConfig(Config):
# User-interactive authentication
ui_auth = config.get("ui_auth") or {}
self.ui_auth_session_timeout = self.parse_duration(
ui_auth.get("session_timeout", 0)
)
self.ui_auth_session_timeout = ui_auth.get("session_timeout", 0)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
@@ -95,8 +93,8 @@ class AuthConfig(Config):
#require_uppercase: true
ui_auth:
# The amount of time to allow a user-interactive authentication session
# to be active.
# The number of milliseconds to allow a user-interactive authentication
# session to be active.
#
# This defaults to 0, meaning the user is queried for their credentials
# before every action, but this can be overridden to allow a single
@@ -107,5 +105,5 @@ class AuthConfig(Config):
# Uncomment below to allow for credential validation to last for 15
# seconds.
#
#session_timeout: "15s"
#session_timeout: 15000
"""

View File

@@ -41,10 +41,6 @@ class FederationConfig(Config):
)
self.federation_metrics_domains = set(federation_metrics_domains)
self.allow_profile_lookup_over_federation = config.get(
"allow_profile_lookup_over_federation", True
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
## Federation ##
@@ -70,12 +66,6 @@ class FederationConfig(Config):
#federation_metrics_domains:
# - matrix.org
# - example.com
# Uncomment to disable profile lookup over federation. By default, the
# Federation API allows other homeservers to obtain profile data of any user
# on this homeserver. Defaults to 'true'.
#
#allow_profile_lookup_over_federation: false
"""

View File

@@ -21,10 +21,8 @@ import threading
from string import Template
import yaml
from zope.interface import implementer
from twisted.logger import (
ILogObserver,
LogBeginner,
STDLibLogObserver,
eventAsText,
@@ -229,8 +227,7 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
threadlocal = threading.local()
@implementer(ILogObserver)
def _log(event: dict) -> None:
def _log(event):
if "log_text" in event:
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
return

View File

@@ -15,7 +15,7 @@
# limitations under the License.
from collections import Counter
from typing import Iterable, Mapping, Optional, Tuple, Type
from typing import Iterable, Optional, Tuple, Type
import attr
@@ -25,7 +25,7 @@ from synapse.types import Collection, JsonDict
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_mxc_uri
from ._base import Config, ConfigError, read_file
from ._base import Config, ConfigError
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
@@ -97,26 +97,7 @@ class OIDCConfig(Config):
#
# client_id: Required. oauth2 client id to use.
#
# client_secret: oauth2 client secret to use. May be omitted if
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
#
# client_secret_jwt_key: Alternative to client_secret: details of a key used
# to create a JSON Web Token to be used as an OAuth2 client secret. If
# given, must be a dictionary with the following properties:
#
# key: a pem-encoded signing key. Must be a suitable key for the
# algorithm specified. Required unless 'key_file' is given.
#
# key_file: the path to file containing a pem-encoded signing key file.
# Required unless 'key' is given.
#
# jwt_header: a dictionary giving properties to include in the JWT
# header. Must include the key 'alg', giving the algorithm used to
# sign the JWT, such as "ES256", using the JWA identifiers in
# RFC7518.
#
# jwt_payload: an optional dictionary giving properties to include in
# the JWT payload. Normally this should include an 'iss' key.
# client_secret: Required. oauth2 client secret to use.
#
# client_auth_method: auth method to use when exchanging the token. Valid
# values are 'client_secret_basic' (default), 'client_secret_post' and
@@ -237,7 +218,7 @@ class OIDCConfig(Config):
#
#- idp_id: github
# idp_name: Github
# idp_brand: github
# idp_brand: org.matrix.github
# discover: false
# issuer: "https://github.com/"
# client_id: "your-client-id" # TO BE FILLED
@@ -259,7 +240,7 @@ class OIDCConfig(Config):
# jsonschema definition of the configuration settings for an oidc identity provider
OIDC_PROVIDER_CONFIG_SCHEMA = {
"type": "object",
"required": ["issuer", "client_id"],
"required": ["issuer", "client_id", "client_secret"],
"properties": {
"idp_id": {
"type": "string",
@@ -272,12 +253,7 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"idp_icon": {"type": "string"},
"idp_brand": {
"type": "string",
"minLength": 1,
"maxLength": 255,
"pattern": "^[a-z][a-z0-9_.-]*$",
},
"idp_unstable_brand": {
"type": "string",
# MSC2758-style namespaced identifier
"minLength": 1,
"maxLength": 255,
"pattern": "^[a-z][a-z0-9_.-]*$",
@@ -286,30 +262,6 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"issuer": {"type": "string"},
"client_id": {"type": "string"},
"client_secret": {"type": "string"},
"client_secret_jwt_key": {
"type": "object",
"required": ["jwt_header"],
"oneOf": [
{"required": ["key"]},
{"required": ["key_file"]},
],
"properties": {
"key": {"type": "string"},
"key_file": {"type": "string"},
"jwt_header": {
"type": "object",
"required": ["alg"],
"properties": {
"alg": {"type": "string"},
},
"additionalProperties": {"type": "string"},
},
"jwt_payload": {
"type": "object",
"additionalProperties": {"type": "string"},
},
},
},
"client_auth_method": {
"type": "string",
# the following list is the same as the keys of
@@ -452,31 +404,15 @@ def _parse_oidc_config_dict(
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
) from e
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
if client_secret_jwt_key_config is not None:
keyfile = client_secret_jwt_key_config.get("key_file")
if keyfile:
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
else:
key = client_secret_jwt_key_config["key"]
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
key=key,
jwt_header=client_secret_jwt_key_config["jwt_header"],
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
)
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
idp_icon=idp_icon,
idp_brand=oidc_config.get("idp_brand"),
unstable_idp_brand=oidc_config.get("unstable_idp_brand"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
client_secret=oidc_config.get("client_secret"),
client_secret_jwt_key=client_secret_jwt_key,
client_secret=oidc_config["client_secret"],
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
@@ -491,18 +427,6 @@ def _parse_oidc_config_dict(
)
@attr.s(slots=True, frozen=True)
class OidcProviderClientSecretJwtKey:
# a pem-encoded signing key
key = attr.ib(type=str)
# properties to include in the JWT header
jwt_header = attr.ib(type=Mapping[str, str])
# properties to include in the JWT payload.
jwt_payload = attr.ib(type=Mapping[str, str])
@attr.s(slots=True, frozen=True)
class OidcProviderConfig:
# a unique identifier for this identity provider. Used in the 'user_external_ids'
@@ -518,9 +442,6 @@ class OidcProviderConfig:
# Optional brand identifier for this IdP.
idp_brand = attr.ib(type=Optional[str])
# Optional brand identifier for the unstable API (see MSC2858).
unstable_idp_brand = attr.ib(type=Optional[str])
# whether the OIDC discovery mechanism is used to discover endpoints
discover = attr.ib(type=bool)
@@ -531,13 +452,8 @@ class OidcProviderConfig:
# oauth2 client id to use
client_id = attr.ib(type=str)
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
# a secret.
client_secret = attr.ib(type=Optional[str])
# key to use to construct a JWT to use as a client secret. May be `None` if
# `client_secret` is set.
client_secret_jwt_key = attr.ib(type=Optional[OidcProviderClientSecretJwtKey])
# oauth2 client secret to use
client_secret = attr.ib(type=str)
# auth method to use when exchanging the token.
# Valid values are 'client_secret_basic', 'client_secret_post' and

View File

@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from ._base import Config, ShardedWorkerHandlingConfig
class PushConfig(Config):
@@ -27,6 +27,9 @@ class PushConfig(Config):
"group_unread_count_by_room", True
)
pusher_instances = config.get("pusher_instances") or []
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
# There was a a 'redact_content' setting but mistakenly read from the
# 'email'section'. Check for the flag in the 'push' section, and log,
# but do not honour it to avoid nasty surprises when people upgrade.

View File

@@ -102,16 +102,6 @@ class RatelimitConfig(Config):
defaults={"per_second": 0.01, "burst_count": 3},
)
# Ratelimit cross-user key requests:
# * For local requests this is keyed by the sending device.
# * For requests received over federation this is keyed by the origin.
#
# Note that this isn't exposed in the configuration as it is obscure.
self.rc_key_requests = RateLimitConfig(
config.get("rc_key_requests", {}),
defaults={"per_second": 20, "burst_count": 100},
)
self.rc_3pid_validation = RateLimitConfig(
config.get("rc_3pid_validation") or {},
defaults={"per_second": 0.003, "burst_count": 5},

View File

@@ -206,6 +206,7 @@ class ContentRepositoryConfig(Config):
def generate_config_section(self, data_dir_path, **kwargs):
media_store = os.path.join(data_dir_path, "media_store")
uploads_path = os.path.join(data_dir_path, "uploads")
formatted_thumbnail_sizes = "".join(
THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES

View File

@@ -263,12 +263,6 @@ class ServerConfig(Config):
False,
)
# Whether to retrieve and display profile data for a user when they
# are invited to a room
self.include_profile_data_on_invite = config.get(
"include_profile_data_on_invite", True
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
@@ -397,6 +391,7 @@ class ServerConfig(Config):
if self.public_baseurl is not None:
if self.public_baseurl[-1] != "/":
self.public_baseurl += "/"
self.start_pushers = config.get("start_pushers", True)
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
@@ -841,7 +836,8 @@ class ServerConfig(Config):
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. Defaults to
# 'false'. Note that profile data is also available via the federation
# API, unless allow_profile_lookup_over_federation is set to false.
# API, so this setting is of limited value if federation is enabled on
# the server.
#
#require_auth_for_profile_requests: true
@@ -852,14 +848,6 @@ class ServerConfig(Config):
#
#limit_profile_requests_to_users_who_share_rooms: true
# Uncomment to prevent a user's profile data from being retrieved and
# displayed in a room until they have joined it. By default, a user's
# profile data is included in an invite event, regardless of the values
# of the above two settings, and whether or not the users share a server.
# Defaults to 'true'.
#
#include_profile_data_on_invite: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.

View File

@@ -13,22 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from ._base import Config
ROOM_STATS_DISABLED_WARN = """\
WARNING: room/user statistics have been disabled via the stats.enabled
configuration setting. This means that certain features (such as the room
directory) will not operate correctly. Future versions of Synapse may ignore
this setting.
To fix this warning, remove the stats.enabled setting from your configuration
file.
--------------------------------------------------------------------------------"""
logger = logging.getLogger(__name__)
class StatsConfig(Config):
"""Stats Configuration
@@ -40,29 +28,30 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
self.stats_bucket_size = 86400 * 1000
self.stats_retention = sys.maxsize
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
self.stats_bucket_size = self.parse_duration(
stats_config.get("bucket_size", "1d")
)
if not self.stats_enabled:
logger.warning(ROOM_STATS_DISABLED_WARN)
self.stats_retention = self.parse_duration(
stats_config.get("retention", "%ds" % (sys.maxsize,))
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
# Settings for local room and user statistics collection. See
# docs/room_and_user_statistics.md.
# Local statistics collection. Used in populating the room directory.
#
stats:
# Uncomment the following to disable room and user statistics. Note that doing
# so may cause certain features (such as the room directory) not to work
# correctly.
#
#enabled: false
# The size of each timeslice in the room_stats_historical and
# user_stats_historical tables, as a time period. Defaults to "1d".
#
#bucket_size: 1h
# 'bucket_size' controls how large each statistics timeslice is. It can
# be defined in a human readable short form -- e.g. "1d", "1y".
#
# 'retention' controls how long historical statistics will be kept for.
# It can be defined in a human readable short form -- e.g. "1d", "1y".
#
#
#stats:
# enabled: true
# bucket_size: 1d
# retention: 1y
"""

View File

@@ -24,46 +24,32 @@ class UserDirectoryConfig(Config):
section = "userdirectory"
def read_config(self, config, **kwargs):
user_directory_config = config.get("user_directory") or {}
self.user_directory_search_enabled = user_directory_config.get("enabled", True)
self.user_directory_search_all_users = user_directory_config.get(
"search_all_users", False
)
self.user_directory_search_prefer_local_users = user_directory_config.get(
"prefer_local_users", False
)
self.user_directory_search_enabled = True
self.user_directory_search_all_users = False
user_directory_config = config.get("user_directory", None)
if user_directory_config:
self.user_directory_search_enabled = user_directory_config.get(
"enabled", True
)
self.user_directory_search_all_users = user_directory_config.get(
"search_all_users", False
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
# User Directory configuration
#
user_directory:
# Defines whether users can search the user directory. If false then
# empty responses are returned to all queries. Defaults to true.
#
# Uncomment to disable the user directory.
#
#enabled: false
# Defines whether to search all users visible to your HS when searching
# the user directory, rather than limiting to users visible in public
# rooms. Defaults to false.
#
# If you set it true, you'll have to rebuild the user_directory search
# indexes, see:
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
#
# Uncomment to return search results containing all known users, even if that
# user does not share a room with the requester.
#
#search_all_users: true
# Defines whether to prefer local users in search query results.
# If True, local users are more likely to appear above remote users
# when searching the user directory. Defaults to false.
#
# Uncomment to prefer local over remote users in user directory search
# results.
#
#prefer_local_users: true
# 'enabled' defines whether users can search the user directory. If
# false then empty responses are returned to all queries. Defaults to
# true.
#
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to
# rebuild the user_directory search indexes, see
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
#
#user_directory:
# enabled: true
# search_all_users: false
"""

View File

@@ -17,28 +17,9 @@ from typing import List, Union
import attr
from ._base import (
Config,
ConfigError,
RoutableShardedWorkerHandlingConfig,
ShardedWorkerHandlingConfig,
)
from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
from .server import ListenerConfig, parse_listener_def
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
The send_federation config option must be disabled in the main
synapse process before they can be run in a separate worker.
Please add ``send_federation: false`` to the main config
"""
_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
The start_pushers config option must be disabled in the main
synapse process before they can be run in a separate worker.
Please add ``start_pushers: false`` to the main config
"""
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
"""Helper for allowing parsing a string or list of strings to a config
@@ -122,7 +103,6 @@ class WorkerConfig(Config):
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master"
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
@@ -138,41 +118,12 @@ class WorkerConfig(Config):
)
)
# Handle federation sender configuration.
#
# There are two ways of configuring which instances handle federation
# sending:
# 1. The old way where "send_federation" is set to false and running a
# `synapse.app.federation_sender` worker app.
# 2. Specifying the workers sending federation in
# `federation_sender_instances`.
#
# Whether to send federation traffic out in this process. This only
# applies to some federation traffic, and so shouldn't be used to
# "disable" federation
self.send_federation = config.get("send_federation", True)
send_federation = config.get("send_federation", True)
federation_sender_instances = config.get("federation_sender_instances")
if federation_sender_instances is None:
# Default to an empty list, which means "another, unknown, worker is
# responsible for it".
federation_sender_instances = []
# If no federation sender instances are set we check if
# `send_federation` is set, which means use master
if send_federation:
federation_sender_instances = ["master"]
if self.worker_app == "synapse.app.federation_sender":
if send_federation:
# If we're running federation senders, and not using
# `federation_sender_instances`, then we should have
# explicitly set `send_federation` to false.
raise ConfigError(
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
)
federation_sender_instances = [self.worker_name]
self.send_federation = self.instance_name in federation_sender_instances
federation_sender_instances = config.get("federation_sender_instances") or []
self.federation_shard_config = ShardedWorkerHandlingConfig(
federation_sender_instances
)
@@ -213,37 +164,7 @@ class WorkerConfig(Config):
"Must only specify one instance to handle `receipts` messages."
)
if len(self.writers.events) == 0:
raise ConfigError("Must specify at least one instance to handle `events`.")
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
self.writers.events
)
# Handle sharded push
start_pushers = config.get("start_pushers", True)
pusher_instances = config.get("pusher_instances")
if pusher_instances is None:
# Default to an empty list, which means "another, unknown, worker is
# responsible for it".
pusher_instances = []
# If no pushers instances are set we check if `start_pushers` is
# set, which means use master
if start_pushers:
pusher_instances = ["master"]
if self.worker_app == "synapse.app.pusher":
if start_pushers:
# If we're running pushers, and not using
# `pusher_instances`, then we should have explicitly set
# `start_pushers` to false.
raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
pusher_instances = [self.instance_name]
self.start_pushers = self.instance_name in pusher_instances
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events)
# Whether this worker should run background tasks or not.
#

View File

@@ -15,7 +15,6 @@
# limitations under the License.
import inspect
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from synapse.rest.media.v1._base import FileInfo
@@ -28,8 +27,6 @@ if TYPE_CHECKING:
import synapse.events
import synapse.server
logger = logging.getLogger(__name__)
class SpamChecker:
def __init__(self, hs: "synapse.server.HomeServer"):
@@ -193,7 +190,6 @@ class SpamChecker:
email_threepid: Optional[dict],
username: Optional[str],
request_info: Collection[Tuple[str, str]],
auth_provider_id: Optional[str] = None,
) -> RegistrationBehaviour:
"""Checks if we should allow the given registration request.
@@ -202,9 +198,6 @@ class SpamChecker:
username: The request user name, if any
request_info: List of tuples of user agent and IP that
were used during the registration process.
auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml",
"cas". If any. Note this does not include users registered
via a password provider.
Returns:
Enum for how the request should be handled
@@ -215,25 +208,9 @@ class SpamChecker:
# spam checker
checker = getattr(spam_checker, "check_registration_for_spam", None)
if checker:
# Provide auth_provider_id if the function supports it
checker_args = inspect.signature(checker)
if len(checker_args.parameters) == 4:
d = checker(
email_threepid,
username,
request_info,
auth_provider_id,
)
elif len(checker_args.parameters) == 3:
d = checker(email_threepid, username, request_info)
else:
logger.error(
"Invalid signature for %s.check_registration_for_spam. Denying registration",
spam_checker.__module__,
)
return RegistrationBehaviour.DENY
behaviour = await maybe_awaitable(d)
behaviour = await maybe_awaitable(
checker(email_threepid, username, request_info)
)
assert isinstance(behaviour, RegistrationBehaviour)
if behaviour != RegistrationBehaviour.ALLOW:
return behaviour

View File

@@ -22,7 +22,6 @@ from typing import (
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
@@ -35,7 +34,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EduTypes, EventTypes, Membership
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -45,7 +44,6 @@ from synapse.api.errors import (
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
@@ -91,15 +89,16 @@ pdu_process_time = Histogram(
"Time taken to process an event",
)
last_pdu_ts_metric = Gauge(
"synapse_federation_last_received_pdu_time",
"The timestamp of the last PDU which was successfully received from the given domain",
last_pdu_age_metric = Gauge(
"synapse_federation_last_received_pdu_age",
"The age (in seconds) of the last PDU successfully received from the given domain",
labelnames=("server_name",),
)
class FederationServer(FederationBase):
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
super().__init__(hs)
self.auth = hs.get_auth()
@@ -112,15 +111,14 @@ class FederationServer(FederationBase):
# with FederationHandlerRegistry.
hs.get_directory_handler()
self._server_linearizer = Linearizer("fed_server")
self._federation_ratelimiter = hs.get_federation_ratelimiter()
# origins that we are currently processing a transaction from.
# a dict from origin to txn id.
self._active_transactions = {} # type: Dict[str, str]
self._server_linearizer = Linearizer("fed_server")
self._transaction_linearizer = Linearizer("fed_txn_handler")
# We cache results for transaction with the same ID
self._transaction_resp_cache = ResponseCache(
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
hs, "fed_txn_handler", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self.transaction_actions = TransactionActions(self.store)
@@ -130,10 +128,10 @@ class FederationServer(FederationBase):
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache = ResponseCache(
hs.get_clock(), "state_resp", timeout_ms=30000
hs, "state_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._state_ids_resp_cache = ResponseCache(
hs.get_clock(), "state_ids_resp", timeout_ms=30000
hs, "state_ids_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._federation_metrics_domains = (
@@ -170,33 +168,6 @@ class FederationServer(FederationBase):
logger.debug("[%s] Got transaction", transaction_id)
# Reject malformed transactions early: reject if too many PDUs/EDUs
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
logger.info("Transaction PDU or EDU count too large. Returning 400")
return 400, {}
# we only process one transaction from each origin at a time. We need to do
# this check here, rather than in _on_incoming_transaction_inner so that we
# don't cache the rejection in _transaction_resp_cache (so that if the txn
# arrives again later, we can process it).
current_transaction = self._active_transactions.get(origin)
if current_transaction and current_transaction != transaction_id:
logger.warning(
"Received another txn %s from %s while still processing %s",
transaction_id,
origin,
current_transaction,
)
return 429, {
"errcode": Codes.UNKNOWN,
"error": "Too many concurrent transactions",
}
# CRITICAL SECTION: we must now not await until we populate _active_transactions
# in _on_incoming_transaction_inner.
# We wrap in a ResponseCache so that we de-duplicate retried
# transactions.
return await self._transaction_resp_cache.wrap(
@@ -210,18 +181,26 @@ class FederationServer(FederationBase):
async def _on_incoming_transaction_inner(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
# CRITICAL SECTION: the first thing we must do (before awaiting) is
# add an entry to _active_transactions.
assert origin not in self._active_transactions
self._active_transactions[origin] = transaction.transaction_id # type: ignore
# Use a linearizer to ensure that transactions from a remote are
# processed in order.
with await self._transaction_linearizer.queue(origin):
# We rate limit here *after* we've queued up the incoming requests,
# so that we don't fill up the ratelimiter with blocked requests.
#
# This is important as the ratelimiter allows N concurrent requests
# at a time, and only starts ratelimiting if there are more requests
# than that being processed at a time. If we queued up requests in
# the linearizer/response cache *after* the ratelimiting then those
# queued up requests would count as part of the allowed limit of N
# concurrent requests.
with self._federation_ratelimiter.ratelimit(origin) as d:
await d
try:
result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
finally:
del self._active_transactions[origin]
result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
async def _handle_incoming_transaction(
self, origin: str, transaction: Transaction, request_time: int
@@ -247,6 +226,19 @@ class FederationServer(FederationBase):
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
# Reject if PDU count > 50 or EDU count > 100
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
logger.info("Transaction PDU or EDU count too large. Returning 400")
response = {}
await self.transaction_actions.set_response(
origin, transaction, 400, response
)
return 400, response
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
# behind the potentially expensive handling of PDUs.
@@ -342,48 +334,42 @@ class FederationServer(FederationBase):
# impose a limit to avoid going too crazy with ram/cpu.
async def process_pdus_for_room(room_id: str):
with nested_logging_context(room_id):
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning(
"Ignoring PDUs for room %s from banned server", room_id
)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
for pdu in pdus_by_room[room_id]:
pdu_results[pdu.event_id] = await process_pdu(pdu)
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
async def process_pdu(pdu: EventBase) -> JsonDict:
event_id = pdu.event_id
with pdu_process_time.time():
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
return {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
return {"error": str(e)}
except Exception as e:
f = failure.Failure()
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
return {"error": str(e)}
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
with pdu_process_time.time():
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
pdu_results[event_id] = {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if newest_pdu_ts and origin in self._federation_metrics_domains:
last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
return pdu_results
@@ -461,22 +447,18 @@ class FederationServer(FederationBase):
async def _on_state_ids_request_compute(self, room_id, event_id):
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids)
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
async def _on_context_state_request_compute(
self, room_id: str, event_id: str
) -> Dict[str, list]:
if event_id:
pdus = await self.handler.get_state_for_pdu(
room_id, event_id
) # type: Iterable[EventBase]
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain(
room_id, [pdu.event_id for pdu in pdus]
)
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
@@ -880,22 +862,13 @@ class FederationHandlerRegistry:
self.edu_handlers = (
{}
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
self.query_handlers = (
{}
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
# Map from type to instance names that we should route EDU handling to.
# We randomly choose one instance from the list to route to for each new
# EDU received.
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
# A rate limiter for incoming room key requests per origin.
self._room_key_request_rate_limiter = Ratelimiter(
clock=self.clock,
rate_hz=self.config.rc_key_requests.per_second,
burst_count=self.config.rc_key_requests.burst_count,
)
def register_edu_handler(
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
):
@@ -916,7 +889,7 @@ class FederationHandlerRegistry:
self.edu_handlers[edu_type] = handler
def register_query_handler(
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
self, query_type: str, handler: Callable[[dict], defer.Deferred]
):
"""Sets the handler callable that will be used to handle an incoming
federation query of the given type.
@@ -944,15 +917,7 @@ class FederationHandlerRegistry:
self._edu_type_to_instance[edu_type] = instance_names
async def on_edu(self, edu_type: str, origin: str, content: dict):
if not self.config.use_presence and edu_type == EduTypes.Presence:
return
# If the incoming room key requests from a particular origin are over
# the limit, drop them.
if (
edu_type == EduTypes.RoomKeyRequest
and not self._room_key_request_rate_limiter.can_do_action(origin)
):
if not self.config.use_presence and edu_type == "m.presence":
return
# Check if we have a handler on this instance
@@ -989,7 +954,7 @@ class FederationHandlerRegistry:
# Oh well, let's just log and move on.
logger.warning("No handler registered for EDU type %s", edu_type)
async def on_query(self, query_type: str, args: dict) -> JsonDict:
async def on_query(self, query_type: str, args: dict):
handler = self.query_handlers.get(query_type)
if handler:
return await handler(args)

View File

@@ -474,7 +474,7 @@ class FederationSender:
self._processing_pending_presence = False
def send_presence_to_destinations(
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
self, states: List[UserPresenceState], destinations: List[str]
) -> None:
"""Send the given presence states to the given destinations.
destinations (list[str])

View File

@@ -17,7 +17,6 @@ import datetime
import logging
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, cast
import attr
from prometheus_client import Counter
from synapse.api.errors import (
@@ -94,10 +93,6 @@ class PerDestinationQueue:
self._destination = destination
self.transmission_loop_running = False
# Flag to signal to any running transmission loop that there is new data
# queued up to be sent.
self._new_data_to_send = False
# True whilst we are sending events that the remote homeserver missed
# because it was unreachable. We start in this state so we can perform
# catch-up at startup.
@@ -113,7 +108,7 @@ class PerDestinationQueue:
# destination (we are the only updater so this is safe)
self._last_successful_stream_ordering = None # type: Optional[int]
# a queue of pending PDUs
# a list of pending PDUs
self._pending_pdus = [] # type: List[EventBase]
# XXX this is never actually used: see
@@ -213,10 +208,6 @@ class PerDestinationQueue:
transaction in the background.
"""
# Mark that we (may) have new things to send, so that any running
# transmission loop will recheck whether there is stuff to send.
self._new_data_to_send = True
if self.transmission_loop_running:
# XXX: this can get stuck on by a never-ending
# request at which point pending_pdus just keeps growing.
@@ -259,41 +250,125 @@ class PerDestinationQueue:
pending_pdus = []
while True:
self._new_data_to_send = False
# We have to keep 2 free slots for presence and rr_edus
limit = MAX_EDUS_PER_TRANSACTION - 2
async with _TransactionQueueManager(self) as (
pending_pdus,
pending_edus,
):
if not pending_pdus and not pending_edus:
logger.debug("TX [%s] Nothing to send", self._destination)
device_update_edus, dev_list_id = await self._get_device_update_edus(
limit
)
# If we've gotten told about new things to send during
# checking for things to send, we try looking again.
# Otherwise new PDUs or EDUs might arrive in the meantime,
# but not get sent because we hold the
# `transmission_loop_running` flag.
if self._new_data_to_send:
continue
else:
return
limit -= len(device_update_edus)
if pending_pdus:
logger.debug(
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
self._destination,
len(pending_pdus),
(
to_device_edus,
device_stream_id,
) = await self._get_to_device_message_edus(limit)
pending_edus = device_update_edus + to_device_edus
# BEGIN CRITICAL SECTION
#
# In order to avoid a race condition, we need to make sure that
# the following code (from popping the queues up to the point
# where we decide if we actually have any pending messages) is
# atomic - otherwise new PDUs or EDUs might arrive in the
# meantime, but not get sent because we hold the
# transmission_loop_running flag.
pending_pdus = self._pending_pdus
# We can only include at most 50 PDUs per transactions
pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
pending_edus.extend(self._get_rr_edus(force_flush=False))
pending_presence = self._pending_presence
self._pending_presence = {}
if pending_presence:
pending_edus.append(
Edu(
origin=self._server_name,
destination=self._destination,
edu_type="m.presence",
content={
"push": [
format_user_presence_state(
presence, self._clock.time_msec()
)
for presence in pending_presence.values()
]
},
)
await self._transaction_manager.send_new_transaction(
self._destination, pending_pdus, pending_edus
)
pending_edus.extend(
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
)
while (
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
and self._pending_edus_keyed
):
_, val = self._pending_edus_keyed.popitem()
pending_edus.append(val)
if pending_pdus:
logger.debug(
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
self._destination,
len(pending_pdus),
)
if not pending_pdus and not pending_edus:
logger.debug("TX [%s] Nothing to send", self._destination)
self._last_device_stream_id = device_stream_id
return
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
pending_edus.extend(self._get_rr_edus(force_flush=True))
# END CRITICAL SECTION
success = await self._transaction_manager.send_new_transaction(
self._destination, pending_pdus, pending_edus
)
if success:
sent_transactions_counter.inc()
sent_edus_counter.inc(len(pending_edus))
for edu in pending_edus:
sent_edus_by_type.labels(edu.edu_type).inc()
# Remove the acknowledged device messages from the database
# Only bother if we actually sent some device messages
if to_device_edus:
await self._store.delete_device_msgs_for_remote(
self._destination, device_stream_id
)
# also mark the device updates as sent
if device_update_edus:
logger.info(
"Marking as sent %r %r", self._destination, dev_list_id
)
await self._store.mark_as_sent_devices_by_remote(
self._destination, dev_list_id
)
self._last_device_stream_id = device_stream_id
self._last_device_list_stream_id = dev_list_id
if pending_pdus:
# we sent some PDUs and it was successful, so update our
# last_successful_stream_ordering in the destinations table.
final_pdu = pending_pdus[-1]
last_successful_stream_ordering = (
final_pdu.internal_metadata.stream_ordering
)
assert last_successful_stream_ordering
await self._store.set_destination_last_successful_stream_ordering(
self._destination, last_successful_stream_ordering
)
else:
break
except NotRetryingDestination as e:
logger.debug(
"TX [%s] not ready for retry yet (next retry at %s) - "
@@ -326,7 +401,7 @@ class PerDestinationQueue:
self._pending_presence = {}
self._pending_rrs = {}
self._start_catching_up()
self._start_catching_up()
except FederationDeniedError as e:
logger.info(e)
except HttpResponseException as e:
@@ -337,6 +412,7 @@ class PerDestinationQueue:
e,
)
self._start_catching_up()
except RequestSendFailed as e:
logger.warning(
"TX [%s] Failed to send transaction: %s", self._destination, e
@@ -346,12 +422,16 @@ class PerDestinationQueue:
logger.info(
"Failed to send event %s to %s", p.event_id, self._destination
)
self._start_catching_up()
except Exception:
logger.exception("TX [%s] Failed to send transaction", self._destination)
for p in pending_pdus:
logger.info(
"Failed to send event %s to %s", p.event_id, self._destination
)
self._start_catching_up()
finally:
# We want to be *very* sure we clear this after we stop processing
self.transmission_loop_running = False
@@ -419,10 +499,13 @@ class PerDestinationQueue:
rooms = [p.room_id for p in catchup_pdus]
logger.info("Catching up rooms to %s: %r", self._destination, rooms)
await self._transaction_manager.send_new_transaction(
success = await self._transaction_manager.send_new_transaction(
self._destination, catchup_pdus, []
)
if not success:
return
sent_transactions_counter.inc()
final_pdu = catchup_pdus[-1]
self._last_successful_stream_ordering = cast(
@@ -501,135 +584,3 @@ class PerDestinationQueue:
"""
self._catching_up = True
self._pending_pdus = []
@attr.s(slots=True)
class _TransactionQueueManager:
"""A helper async context manager for pulling stuff off the queues and
tracking what was last successfully sent, etc.
"""
queue = attr.ib(type=PerDestinationQueue)
_device_stream_id = attr.ib(type=Optional[int], default=None)
_device_list_id = attr.ib(type=Optional[int], default=None)
_last_stream_ordering = attr.ib(type=Optional[int], default=None)
_pdus = attr.ib(type=List[EventBase], factory=list)
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
# First we calculate the EDUs we want to send, if any.
# We start by fetching device related EDUs, i.e device updates and to
# device messages. We have to keep 2 free slots for presence and rr_edus.
limit = MAX_EDUS_PER_TRANSACTION - 2
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
limit
)
if device_update_edus:
self._device_list_id = dev_list_id
else:
self.queue._last_device_list_stream_id = dev_list_id
limit -= len(device_update_edus)
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(limit)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
# And presence EDU.
if self.queue._pending_presence:
pending_edus.append(
Edu(
origin=self.queue._server_name,
destination=self.queue._destination,
edu_type="m.presence",
content={
"push": [
format_user_presence_state(
presence, self.queue._clock.time_msec()
)
for presence in self.queue._pending_presence.values()
]
},
)
)
self.queue._pending_presence = {}
# Finally add any other types of EDUs if there is room.
pending_edus.extend(
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
)
while (
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
and self.queue._pending_edus_keyed
):
_, val = self.queue._pending_edus_keyed.popitem()
pending_edus.append(val)
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
# queue
self._pdus = self.queue._pending_pdus[:50]
if not self._pdus and not pending_edus:
return [], []
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
if self._pdus:
self._last_stream_ordering = self._pdus[
-1
].internal_metadata.stream_ordering
assert self._last_stream_ordering
return self._pdus, pending_edus
async def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
# Failed to send transaction, so we bail out.
return
# Successfully sent transactions, so we remove pending PDUs from the queue
if self._pdus:
self.queue._pending_pdus = self.queue._pending_pdus[len(self._pdus) :]
# Succeeded to send the transaction so we record where we have sent up
# to in the various streams
if self._device_stream_id:
await self.queue._store.delete_device_msgs_for_remote(
self.queue._destination, self._device_stream_id
)
self.queue._last_device_stream_id = self._device_stream_id
# also mark the device updates as sent
if self._device_list_id:
logger.info(
"Marking as sent %r %r", self.queue._destination, self._device_list_id
)
await self.queue._store.mark_as_sent_devices_by_remote(
self.queue._destination, self._device_list_id
)
self.queue._last_device_list_stream_id = self._device_list_id
if self._last_stream_ordering:
# we sent some PDUs and it was successful, so update our
# last_successful_stream_ordering in the destinations table.
await self.queue._store.set_destination_last_successful_stream_ordering(
self.queue._destination, self._last_stream_ordering
)

View File

@@ -36,9 +36,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
last_pdu_ts_metric = Gauge(
"synapse_federation_last_sent_pdu_time",
"The timestamp of the last PDU which was successfully sent to the given domain",
last_pdu_age_metric = Gauge(
"synapse_federation_last_sent_pdu_age",
"The age (in seconds) of the last PDU successfully sent to the given domain",
labelnames=("server_name",),
)
@@ -69,12 +69,15 @@ class TransactionManager:
destination: str,
pdus: List[EventBase],
edus: List[Edu],
) -> None:
) -> bool:
"""
Args:
destination: The destination to send to (e.g. 'example.org')
pdus: In-order list of PDUs to send
edus: List of EDUs to send
Returns:
True iff the transaction was successful
"""
# Make a transaction-sending opentracing span. This span follows on from
@@ -93,6 +96,8 @@ class TransactionManager:
edu.strip_context()
with start_active_span_follows_from("send_transaction", span_contexts):
success = True
logger.debug("TX [%s] _attempt_new_transaction", destination)
txn_id = str(self._next_txn_id)
@@ -147,29 +152,45 @@ class TransactionManager:
response = await self._transport_layer.send_transaction(
transaction, json_data_cb
)
code = 200
except HttpResponseException as e:
code = e.code
response = e.response
set_tag(tags.ERROR, True)
if e.code in (401, 404, 429) or 500 <= e.code:
logger.info(
"TX [%s] {%s} got %d response", destination, txn_id, code
)
raise e
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
raise
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
logger.info("TX [%s] {%s} got 200 response", destination, txn_id)
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.warning(
"TX [%s] {%s} Remote returned error for %s: %s",
destination,
txn_id,
e_id,
r,
)
else:
for p in pdus:
logger.warning(
"TX [%s] {%s} Remote returned error for %s: %s",
"TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
e_id,
r,
p.event_id,
)
success = False
if pdus and destination in self._federation_metrics_domains:
if success and pdus and destination in self._federation_metrics_domains:
last_pdu = pdus[-1]
last_pdu_ts_metric.labels(server_name=destination).set(
last_pdu.origin_server_ts / 1000
last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts
last_pdu_age_metric.labels(server_name=destination).set(
last_pdu_age / 1000
)
set_tag(tags.ERROR, not success)
return success

View File

@@ -21,7 +21,6 @@ import re
from typing import Optional, Tuple, Type
import synapse
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
@@ -484,9 +483,10 @@ class FederationQueryServlet(BaseFederationServlet):
# This is when we receive a server-server Query
async def on_GET(self, origin, content, query, query_type):
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
args["origin"] = origin
return await self.handler.on_query_request(query_type, args)
return await self.handler.on_query_request(
query_type,
{k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()},
)
class FederationMakeJoinServlet(BaseFederationServlet):
@@ -1118,17 +1118,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(
400, "category_id cannot be empty string", Codes.INVALID_PARAM
)
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.update_group_summary_room(
group_id,
@@ -1194,14 +1184,6 @@ class FederationGroupsCategoryServlet(BaseFederationServlet):
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
@@ -1258,17 +1240,7 @@ class FederationGroupsRoleServlet(BaseFederationServlet):
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(
400, "role_id cannot be empty string", Codes.INVALID_PARAM
)
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.update_group_role(
group_id, requester_user_id, role_id, content
@@ -1313,14 +1285,6 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_user(
group_id,
requester_user_id,

View File

@@ -37,16 +37,13 @@ An attestation is a signed blob of json that looks like:
import logging
import random
from typing import TYPE_CHECKING, Optional, Tuple
from typing import Tuple
from signedjson.sign import sign_json
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
from synapse.types import get_domain_from_id
logger = logging.getLogger(__name__)
@@ -66,19 +63,15 @@ UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000
class GroupAttestationSigning:
"""Creates and verifies group attestations."""
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
self.keyring = hs.get_keyring()
self.clock = hs.get_clock()
self.server_name = hs.hostname
self.signing_key = hs.signing_key
async def verify_attestation(
self,
attestation: JsonDict,
group_id: str,
user_id: str,
server_name: Optional[str] = None,
) -> None:
self, attestation, group_id, user_id, server_name=None
):
"""Verifies that the given attestation matches the given parameters.
An optional server_name can be supplied to explicitly set which server's
@@ -107,18 +100,16 @@ class GroupAttestationSigning:
if valid_until_ms < now:
raise SynapseError(400, "Attestation expired")
assert server_name is not None
await self.keyring.verify_json_for_server(
server_name, attestation, now, "Group attestation"
)
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
def create_attestation(self, group_id, user_id):
"""Create an attestation for the group_id and user_id with default
validity length.
"""
validity_period = DEFAULT_ATTESTATION_LENGTH_MS * random.uniform(
*DEFAULT_ATTESTATION_JITTER
)
validity_period = DEFAULT_ATTESTATION_LENGTH_MS
validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER)
valid_until_ms = int(self.clock.time_msec() + validity_period)
return sign_json(
@@ -135,7 +126,7 @@ class GroupAttestationSigning:
class GroupAttestionRenewer:
"""Responsible for sending and receiving attestation updates."""
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.assestations = hs.get_groups_attestation_signing()
@@ -148,9 +139,7 @@ class GroupAttestionRenewer:
self._start_renew_attestations, 30 * 60 * 1000
)
async def on_renew_attestation(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
async def on_renew_attestation(self, group_id, user_id, content):
"""When a remote updates an attestation"""
attestation = content["attestation"]
@@ -165,10 +154,10 @@ class GroupAttestionRenewer:
return {}
def _start_renew_attestations(self) -> None:
def _start_renew_attestations(self):
return run_as_background_process("renew_attestations", self._renew_attestations)
async def _renew_attestations(self) -> None:
async def _renew_attestations(self):
"""Called periodically to check if we need to update any of our attestations"""
now = self.clock.time_msec()
@@ -177,7 +166,7 @@ class GroupAttestionRenewer:
now + UPDATE_ATTESTATION_TIME_MS
)
async def _renew_attestation(group_user: Tuple[str, str]) -> None:
async def _renew_attestation(group_user: Tuple[str, str]):
group_id, user_id = group_user
try:
if not self.is_mine_id(group_id):

View File

@@ -16,17 +16,12 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, SynapseError
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import GroupID, JsonDict, RoomID, UserID, get_domain_from_id
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
from synapse.util.async_helpers import concurrently_execute
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
@@ -44,7 +39,7 @@ MAX_LONG_DESC_LEN = 10000
class GroupsServerWorkerHandler:
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.room_list_handler = hs.get_room_list_handler()
@@ -59,21 +54,16 @@ class GroupsServerWorkerHandler:
self.profile_handler = hs.get_profile_handler()
async def check_group_is_ours(
self,
group_id: str,
requester_user_id: str,
and_exists: bool = False,
and_is_admin: Optional[str] = None,
) -> Optional[dict]:
self, group_id, requester_user_id, and_exists=False, and_is_admin=None
):
"""Check that the group is ours, and optionally if it exists.
If group does exist then return group.
Args:
group_id: The group ID to check.
requester_user_id: The user ID of the requester.
and_exists: whether to also check if group exists
and_is_admin: whether to also check if given str is a user_id
group_id (str)
and_exists (bool): whether to also check if group exists
and_is_admin (str): whether to also check if given str is a user_id
that is an admin
"""
if not self.is_mine_id(group_id):
@@ -96,9 +86,7 @@ class GroupsServerWorkerHandler:
return group
async def get_group_summary(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_group_summary(self, group_id, requester_user_id):
"""Get the summary for a group as seen by requester_user_id.
The group summary consists of the profile of the room, and a curated
@@ -131,8 +119,6 @@ class GroupsServerWorkerHandler:
entry = await self.room_list_handler.generate_room_entry(
room_id, len(joined_users), with_alias=False, allow_private=True
)
if entry is None:
continue
entry = dict(entry) # so we don't change what's cached
entry.pop("room_id", None)
@@ -140,22 +126,22 @@ class GroupsServerWorkerHandler:
rooms.sort(key=lambda e: e.get("order", 0))
for user in users:
user_id = user["user_id"]
for entry in users:
user_id = entry["user_id"]
if not self.is_mine_id(requester_user_id):
attestation = await self.store.get_remote_attestation(group_id, user_id)
if not attestation:
continue
user["attestation"] = attestation
entry["attestation"] = attestation
else:
user["attestation"] = self.attestations.create_attestation(
entry["attestation"] = self.attestations.create_attestation(
group_id, user_id
)
user_profile = await self.profile_handler.get_profile_from_cache(user_id)
user.update(user_profile)
entry.update(user_profile)
users.sort(key=lambda e: e.get("order", 0))
@@ -178,43 +164,40 @@ class GroupsServerWorkerHandler:
"user": membership_info,
}
async def get_group_categories(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_group_categories(self, group_id, requester_user_id):
"""Get all categories in a group (as seen by user)"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
categories = await self.store.get_group_categories(group_id=group_id)
return {"categories": categories}
async def get_group_category(
self, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
async def get_group_category(self, group_id, requester_user_id, category_id):
"""Get a specific category in a group (as seen by user)"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
return await self.store.get_group_category(
res = await self.store.get_group_category(
group_id=group_id, category_id=category_id
)
async def get_group_roles(self, group_id: str, requester_user_id: str) -> JsonDict:
logger.info("group %s", res)
return res
async def get_group_roles(self, group_id, requester_user_id):
"""Get all roles in a group (as seen by user)"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
roles = await self.store.get_group_roles(group_id=group_id)
return {"roles": roles}
async def get_group_role(
self, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
async def get_group_role(self, group_id, requester_user_id, role_id):
"""Get a specific role in a group (as seen by user)"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
return await self.store.get_group_role(group_id=group_id, role_id=role_id)
res = await self.store.get_group_role(group_id=group_id, role_id=role_id)
return res
async def get_group_profile(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_group_profile(self, group_id, requester_user_id):
"""Get the group profile as seen by requester_user_id"""
await self.check_group_is_ours(group_id, requester_user_id)
@@ -236,9 +219,7 @@ class GroupsServerWorkerHandler:
else:
raise SynapseError(404, "Unknown group")
async def get_users_in_group(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_users_in_group(self, group_id, requester_user_id):
"""Get the users in group as seen by requester_user_id.
The ordering is arbitrary at the moment
@@ -287,9 +268,7 @@ class GroupsServerWorkerHandler:
return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
async def get_invited_users_in_group(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_invited_users_in_group(self, group_id, requester_user_id):
"""Get the users that have been invited to a group as seen by requester_user_id.
The ordering is arbitrary at the moment
@@ -319,9 +298,7 @@ class GroupsServerWorkerHandler:
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
async def get_rooms_in_group(
self, group_id: str, requester_user_id: str
) -> JsonDict:
async def get_rooms_in_group(self, group_id, requester_user_id):
"""Get the rooms in group as seen by requester_user_id
This returns rooms in order of decreasing number of joined users
@@ -359,20 +336,15 @@ class GroupsServerWorkerHandler:
class GroupsServerHandler(GroupsServerWorkerHandler):
def __init__(self, hs: "HomeServer"):
def __init__(self, hs):
super().__init__(hs)
# Ensure attestations get renewed
hs.get_groups_attestation_renewer()
async def update_group_summary_room(
self,
group_id: str,
requester_user_id: str,
room_id: str,
category_id: str,
content: JsonDict,
) -> JsonDict:
self, group_id, requester_user_id, room_id, category_id, content
):
"""Add/update a room to the group summary"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -395,8 +367,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def delete_group_summary_room(
self, group_id: str, requester_user_id: str, room_id: str, category_id: str
) -> JsonDict:
self, group_id, requester_user_id, room_id, category_id
):
"""Remove a room from the summary"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -408,9 +380,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def set_group_join_policy(
self, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
async def set_group_join_policy(self, group_id, requester_user_id, content):
"""Sets the group join policy.
Currently supported policies are:
@@ -430,8 +400,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def update_group_category(
self, group_id: str, requester_user_id: str, category_id: str, content: JsonDict
) -> JsonDict:
self, group_id, requester_user_id, category_id, content
):
"""Add/Update a group category"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -449,9 +419,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def delete_group_category(
self, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
async def delete_group_category(self, group_id, requester_user_id, category_id):
"""Delete a group category"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -463,9 +431,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def update_group_role(
self, group_id: str, requester_user_id: str, role_id: str, content: JsonDict
) -> JsonDict:
async def update_group_role(self, group_id, requester_user_id, role_id, content):
"""Add/update a role in a group"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -481,9 +447,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def delete_group_role(
self, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
async def delete_group_role(self, group_id, requester_user_id, role_id):
"""Remove role from group"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -494,13 +458,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def update_group_summary_user(
self,
group_id: str,
requester_user_id: str,
user_id: str,
role_id: str,
content: JsonDict,
) -> JsonDict:
self, group_id, requester_user_id, user_id, role_id, content
):
"""Add/update a users entry in the group summary"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -521,8 +480,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def delete_group_summary_user(
self, group_id: str, requester_user_id: str, user_id: str, role_id: str
) -> JsonDict:
self, group_id, requester_user_id, user_id, role_id
):
"""Remove a user from the group summary"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -534,9 +493,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def update_group_profile(
self, group_id: str, requester_user_id: str, content: JsonDict
) -> None:
async def update_group_profile(self, group_id, requester_user_id, content):
"""Update the group profile"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -567,9 +524,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
await self.store.update_group_profile(group_id, profile)
async def add_room_to_group(
self, group_id: str, requester_user_id: str, room_id: str, content: JsonDict
) -> JsonDict:
async def add_room_to_group(self, group_id, requester_user_id, room_id, content):
"""Add room to group"""
RoomID.from_string(room_id) # Ensure valid room id
@@ -584,13 +539,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def update_room_in_group(
self,
group_id: str,
requester_user_id: str,
room_id: str,
config_key: str,
content: JsonDict,
) -> JsonDict:
self, group_id, requester_user_id, room_id, config_key, content
):
"""Update room in group"""
RoomID.from_string(room_id) # Ensure valid room id
@@ -609,9 +559,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def remove_room_from_group(
self, group_id: str, requester_user_id: str, room_id: str
) -> JsonDict:
async def remove_room_from_group(self, group_id, requester_user_id, room_id):
"""Remove room from group"""
await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
@@ -621,16 +569,12 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def invite_to_group(
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
async def invite_to_group(self, group_id, user_id, requester_user_id, content):
"""Invite user to group"""
group = await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
)
if not group:
raise SynapseError(400, "Group does not exist", errcode=Codes.BAD_STATE)
# TODO: Check if user knocked
@@ -653,9 +597,6 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
if self.hs.is_mine_id(user_id):
groups_local = self.hs.get_groups_local_handler()
assert isinstance(
groups_local, GroupsLocalHandler
), "Workers cannot invites users to groups."
res = await groups_local.on_invite(group_id, user_id, content)
local_attestation = None
else:
@@ -691,7 +632,6 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
local_attestation=local_attestation,
remote_attestation=remote_attestation,
)
return {"state": "join"}
elif res["state"] == "invite":
await self.store.add_group_invite(group_id, user_id)
return {"state": "invite"}
@@ -700,17 +640,13 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
else:
raise SynapseError(502, "Unknown state returned by HS")
async def _add_user(
self, group_id: str, user_id: str, content: JsonDict
) -> Optional[JsonDict]:
async def _add_user(self, group_id, user_id, content):
"""Add a user to a group based on a content dict.
See accept_invite, join_group.
"""
if not self.hs.is_mine_id(user_id):
local_attestation = self.attestations.create_attestation(
group_id, user_id
) # type: Optional[JsonDict]
local_attestation = self.attestations.create_attestation(group_id, user_id)
remote_attestation = content["attestation"]
@@ -734,9 +670,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return local_attestation
async def accept_invite(
self, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
async def accept_invite(self, group_id, requester_user_id, content):
"""User tries to accept an invite to the group.
This is different from them asking to join, and so should error if no
@@ -755,9 +689,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {"state": "join", "attestation": local_attestation}
async def join_group(
self, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
async def join_group(self, group_id, requester_user_id, content):
"""User tries to join the group.
This will error if the group requires an invite/knock to join
@@ -766,8 +698,6 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
group_info = await self.check_group_is_ours(
group_id, requester_user_id, and_exists=True
)
if not group_info:
raise SynapseError(404, "Group does not exist", errcode=Codes.NOT_FOUND)
if group_info["join_policy"] != "open":
raise SynapseError(403, "Group is not publicly joinable")
@@ -775,9 +705,25 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {"state": "join", "attestation": local_attestation}
async def knock(self, group_id, requester_user_id, content):
"""A user requests becoming a member of the group"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
raise NotImplementedError()
async def accept_knock(self, group_id, requester_user_id, content):
"""Accept a users knock to the room.
Errors if the user hasn't knocked, rather than inviting them.
"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
raise NotImplementedError()
async def remove_user_from_group(
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
self, group_id, user_id, requester_user_id, content
):
"""Remove a user from the group; either a user is leaving or an admin
kicked them.
"""
@@ -799,9 +745,6 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
if is_kick:
if self.hs.is_mine_id(user_id):
groups_local = self.hs.get_groups_local_handler()
assert isinstance(
groups_local, GroupsLocalHandler
), "Workers cannot remove users from groups."
await groups_local.user_removed_from_group(group_id, user_id, {})
else:
await self.transport_client.remove_user_from_group_notification(
@@ -818,15 +761,14 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {}
async def create_group(
self, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
async def create_group(self, group_id, requester_user_id, content):
group = await self.check_group_is_ours(group_id, requester_user_id)
logger.info("Attempting to create group with ID: %r", group_id)
# parsing the id into a GroupID validates it.
group_id_obj = GroupID.from_string(group_id)
group = await self.check_group_is_ours(group_id, requester_user_id)
if group:
raise SynapseError(400, "Group already exists")
@@ -871,7 +813,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
local_attestation = self.attestations.create_attestation(
group_id, requester_user_id
) # type: Optional[JsonDict]
)
else:
local_attestation = None
remote_attestation = None
@@ -894,14 +836,15 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
return {"group_id": group_id}
async def delete_group(self, group_id: str, requester_user_id: str) -> None:
async def delete_group(self, group_id, requester_user_id):
"""Deletes a group, kicking out all current members.
Only group admins or server admins can call this request
Args:
group_id: The group ID to delete.
requester_user_id: The user requesting to delete the group.
group_id (str)
request_user_id (str)
"""
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
@@ -924,9 +867,6 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
async def _kick_user_from_group(user_id):
if self.hs.is_mine_id(user_id):
groups_local = self.hs.get_groups_local_handler()
assert isinstance(
groups_local, GroupsLocalHandler
), "Workers cannot kick users from groups."
await groups_local.user_removed_from_group(group_id, user_id, {})
else:
await self.transport_client.remove_user_from_group_notification(
@@ -958,7 +898,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
await self.store.delete_group(group_id)
def _parse_join_policy_from_contents(content: JsonDict) -> Optional[str]:
def _parse_join_policy_from_contents(content):
"""Given a content for a request, return the specified join policy or None"""
join_policy_dict = content.get("m.join_policy")
@@ -968,7 +908,7 @@ def _parse_join_policy_from_contents(content: JsonDict) -> Optional[str]:
return None
def _parse_join_policy_dict(join_policy_dict: JsonDict) -> str:
def _parse_join_policy_dict(join_policy_dict):
"""Given a dict for the "m.join_policy" config return the join policy specified"""
join_policy_type = join_policy_dict.get("type")
if not join_policy_type:
@@ -979,7 +919,7 @@ def _parse_join_policy_dict(join_policy_dict: JsonDict) -> str:
return join_policy_type
def _parse_visibility_from_contents(content: JsonDict) -> bool:
def _parse_visibility_from_contents(content):
"""Given a content for a request parse out whether the entity should be
public or not
"""
@@ -993,7 +933,7 @@ def _parse_visibility_from_contents(content: JsonDict) -> bool:
return is_public
def _parse_visibility_dict(visibility: JsonDict) -> bool:
def _parse_visibility_dict(visibility):
"""Given a dict for the "m.visibility" config return if the entity should
be public or not
"""

View File

@@ -73,9 +73,7 @@ class AcmeHandler:
"Listening for ACME requests on %s:%i", host, self.hs.config.acme_port
)
try:
self.reactor.listenTCP(
self.hs.config.acme_port, srv, backlog=50, interface=host
)
self.reactor.listenTCP(self.hs.config.acme_port, srv, interface=host)
except twisted.internet.error.CannotListenError as e:
check_bind_error(e, host, bind_addresses)

View File

@@ -36,7 +36,7 @@ import attr
import bcrypt
import pymacaroons
from twisted.web.server import Request
from twisted.web.http import Request
from synapse.api.constants import LoginType
from synapse.api.errors import (
@@ -65,7 +65,6 @@ from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import maybe_awaitable
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.threepids import canonicalise_email
@@ -171,16 +170,6 @@ class SsoLoginExtraAttributes:
extra_attributes = attr.ib(type=JsonDict)
@attr.s(slots=True, frozen=True)
class LoginTokenAttributes:
"""Data we store in a short-term login token"""
user_id = attr.ib(type=str)
# the SSO Identity Provider that the user authenticated with, to get this token
auth_provider_id = attr.ib(type=str)
class AuthHandler(BaseHandler):
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
@@ -337,8 +326,7 @@ class AuthHandler(BaseHandler):
user is too high to proceed
"""
if not requester.access_token_id:
raise ValueError("Cannot validate a user without an access token")
if self._ui_auth_session_timeout:
last_validated = await self.store.get_access_token_last_validated(
requester.access_token_id
@@ -493,7 +481,7 @@ class AuthHandler(BaseHandler):
sid = authdict["session"]
# Convert the URI and method to strings.
uri = request.uri.decode("utf-8") # type: ignore
uri = request.uri.decode("utf-8")
method = request.method.decode("utf-8")
# If there's no session ID, create a new session.
@@ -1176,16 +1164,18 @@ class AuthHandler(BaseHandler):
return None
return user_id
async def validate_short_term_login_token(
self, login_token: str
) -> LoginTokenAttributes:
async def validate_short_term_login_token_and_get_user_id(self, login_token: str):
auth_api = self.hs.get_auth()
user_id = None
try:
res = self.macaroon_gen.verify_short_term_login_token(login_token)
macaroon = pymacaroons.Macaroon.deserialize(login_token)
user_id = auth_api.get_user_id_from_macaroon(macaroon)
auth_api.validate_macaroon(macaroon, "login", user_id)
except Exception:
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
await self.auth.check_auth_blocking(res.user_id)
return res
await self.auth.check_auth_blocking(user_id)
return user_id
async def delete_access_token(self, access_token: str):
"""Invalidate a single access token
@@ -1214,7 +1204,7 @@ class AuthHandler(BaseHandler):
async def delete_access_tokens_for_user(
self,
user_id: str,
except_token_id: Optional[int] = None,
except_token_id: Optional[str] = None,
device_id: Optional[str] = None,
):
"""Invalidate access tokens belonging to a user
@@ -1407,7 +1397,6 @@ class AuthHandler(BaseHandler):
async def complete_sso_login(
self,
registered_user_id: str,
auth_provider_id: str,
request: Request,
client_redirect_url: str,
extra_attributes: Optional[JsonDict] = None,
@@ -1417,9 +1406,6 @@ class AuthHandler(BaseHandler):
Args:
registered_user_id: The registered user ID to complete SSO login for.
auth_provider_id: The id of the SSO Identity provider that was used for
login. This will be stored in the login token for future tracking in
prometheus metrics.
request: The request to complete.
client_redirect_url: The URL to which to redirect the user at the end of the
process.
@@ -1441,7 +1427,6 @@ class AuthHandler(BaseHandler):
self._complete_sso_login(
registered_user_id,
auth_provider_id,
request,
client_redirect_url,
extra_attributes,
@@ -1452,7 +1437,6 @@ class AuthHandler(BaseHandler):
def _complete_sso_login(
self,
registered_user_id: str,
auth_provider_id: str,
request: Request,
client_redirect_url: str,
extra_attributes: Optional[JsonDict] = None,
@@ -1479,7 +1463,7 @@ class AuthHandler(BaseHandler):
# Create a login token
login_token = self.macaroon_gen.generate_short_term_login_token(
registered_user_id, auth_provider_id=auth_provider_id
registered_user_id
)
# Append the login token to the original redirect URL (i.e. with its query
@@ -1585,48 +1569,15 @@ class MacaroonGenerator:
return macaroon.serialize()
def generate_short_term_login_token(
self,
user_id: str,
auth_provider_id: str,
duration_in_ms: int = (2 * 60 * 1000),
self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
) -> str:
macaroon = self._generate_base_macaroon(user_id)
macaroon.add_first_party_caveat("type = login")
now = self.hs.get_clock().time_msec()
expiry = now + duration_in_ms
macaroon.add_first_party_caveat("time < %d" % (expiry,))
macaroon.add_first_party_caveat("auth_provider_id = %s" % (auth_provider_id,))
return macaroon.serialize()
def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes:
"""Verify a short-term-login macaroon
Checks that the given token is a valid, unexpired short-term-login token
minted by this server.
Args:
token: the login token to verify
Returns:
the user_id that this token is valid for
Raises:
MacaroonVerificationFailedException if the verification failed
"""
macaroon = pymacaroons.Macaroon.deserialize(token)
user_id = get_value_from_macaroon(macaroon, "user_id")
auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id")
v = pymacaroons.Verifier()
v.satisfy_exact("gen = 1")
v.satisfy_exact("type = login")
v.satisfy_general(lambda c: c.startswith("user_id = "))
v.satisfy_general(lambda c: c.startswith("auth_provider_id = "))
satisfy_expiry(v, self.hs.get_clock().time_msec)
v.verify(macaroon, self.hs.config.key.macaroon_secret_key)
return LoginTokenAttributes(user_id=user_id, auth_provider_id=auth_provider_id)
def generate_delete_pusher_token(self, user_id: str) -> str:
macaroon = self._generate_base_macaroon(user_id)
macaroon.add_first_party_caveat("type = delete_pusher")

View File

@@ -83,7 +83,6 @@ class CasHandler:
# the SsoIdentityProvider protocol type.
self.idp_icon = None
self.idp_brand = None
self.unstable_idp_brand = None
self._sso_handler = hs.get_sso_handler()

View File

@@ -120,11 +120,6 @@ class DeactivateAccountHandler(BaseHandler):
await self.store.user_set_password_hash(user_id, None)
# Most of the pushers will have been deleted when we logged out the
# associated devices above, but we still need to delete pushers not
# associated with devices, e.g. email pushers.
await self.store.delete_all_pushers_for_user(user_id)
# Add the user to a table of users pending deactivation (ie.
# removal from all the rooms they're a member of)
await self.store.add_user_pending_deactivation(user_id)

View File

@@ -16,9 +16,7 @@
import logging
from typing import TYPE_CHECKING, Any, Dict
from synapse.api.constants import EduTypes
from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
get_active_span_text_map,
@@ -27,7 +25,7 @@ from synapse.logging.opentracing import (
start_active_span,
)
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import JsonDict, Requester, UserID, get_domain_from_id
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util import json_encoder
from synapse.util.stringutils import random_string
@@ -80,12 +78,6 @@ class DeviceMessageHandler:
ReplicationUserDevicesResyncRestServlet.make_client(hs)
)
self._ratelimiter = Ratelimiter(
clock=hs.get_clock(),
rate_hz=hs.config.rc_key_requests.per_second,
burst_count=hs.config.rc_key_requests.burst_count,
)
async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None:
local_messages = {}
sender_user_id = content["sender"]
@@ -176,27 +168,15 @@ class DeviceMessageHandler:
async def send_device_message(
self,
requester: Requester,
sender_user_id: str,
message_type: str,
messages: Dict[str, Dict[str, JsonDict]],
) -> None:
sender_user_id = requester.user.to_string()
set_tag("number_of_messages", len(messages))
set_tag("sender", sender_user_id)
local_messages = {}
remote_messages = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]]
for user_id, by_device in messages.items():
# Ratelimit local cross-user key requests by the sending device.
if (
message_type == EduTypes.RoomKeyRequest
and user_id != sender_user_id
and self._ratelimiter.can_do_action(
(sender_user_id, requester.device_id)
)
):
continue
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
messages_by_device = {

View File

@@ -17,7 +17,7 @@ import logging
import random
from typing import TYPE_CHECKING, Iterable, List, Optional
from synapse.api.constants import EduTypes, EventTypes, Membership
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
@@ -113,7 +113,7 @@ class EventStreamHandler(BaseHandler):
states = await presence_handler.get_states(users)
to_add.extend(
{
"type": EduTypes.Presence,
"type": EventTypes.Presence,
"content": format_user_presence_state(state, time_now),
}
for state in states

Some files were not shown because too many files have changed in this diff Show More