Compare commits
190 Commits
travis/fos
...
v1.30.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2904f720d | ||
|
|
45ef73fd4f | ||
|
|
e3bc0e6f7c | ||
|
|
ad5d2e7ec0 | ||
|
|
d315e96443 | ||
|
|
847ecdd8fa | ||
|
|
ccf1dc51d7 | ||
|
|
1383508f29 | ||
|
|
dd69110d95 | ||
|
|
5b5bc188cf | ||
|
|
1b0eaed21f | ||
|
|
1c8a2541da | ||
|
|
f87dfb9403 | ||
|
|
d29b71aa50 | ||
|
|
026503fa3b | ||
|
|
af2248f8bf | ||
|
|
55da8df078 | ||
|
|
1e67bff833 | ||
|
|
2b328d7e02 | ||
|
|
464e5da7b2 | ||
|
|
e55bd0e110 | ||
|
|
70d1b6abff | ||
|
|
a7a3790066 | ||
|
|
1107214a1d | ||
|
|
17cd48fe51 | ||
|
|
2a99cc6524 | ||
|
|
918f6ed827 | ||
|
|
67b979bfa1 | ||
|
|
dc51d8ffaf | ||
|
|
e9df3f496b | ||
|
|
eaada74075 | ||
|
|
9cd18cc588 | ||
|
|
7fdc6cefb3 | ||
|
|
075c16b410 | ||
|
|
3ce650057d | ||
|
|
576c91c7c1 | ||
|
|
22db45bd4d | ||
|
|
9898470e7d | ||
|
|
0764d0c6e5 | ||
|
|
d6196efafc | ||
|
|
b2c4d3d721 | ||
|
|
7076eee4b9 | ||
|
|
cb7fc7523e | ||
|
|
b988b07bb0 | ||
|
|
4de1c35728 | ||
|
|
15c788e22d | ||
|
|
58114f8a17 | ||
|
|
0fc4eb103a | ||
|
|
e5da770cce | ||
|
|
8a4b3738f3 | ||
|
|
df425c2c63 | ||
|
|
7eb6e39a8f | ||
|
|
a6333b8d42 | ||
|
|
ea0a3aaf0a | ||
|
|
3f49d80dcf | ||
|
|
33a02f0f52 | ||
|
|
4db07f9aef | ||
|
|
a4fa044c00 | ||
|
|
922788c604 | ||
|
|
d790d0d314 | ||
|
|
0c330423bc | ||
|
|
16f9f93eb7 | ||
|
|
a5daae2a5f | ||
|
|
0279e0e086 | ||
|
|
aee10768d8 | ||
|
|
7f5d753d06 | ||
|
|
16108c579d | ||
|
|
f00c4e7af0 | ||
|
|
ad8589d392 | ||
|
|
16ec8c3272 | ||
|
|
a0bc9d387e | ||
|
|
e12077a78a | ||
|
|
ddb240293a | ||
|
|
15090de850 | ||
|
|
e53f11bd62 | ||
|
|
2566dc57ce | ||
|
|
1e62d9ee8c | ||
|
|
1efdcc3e87 | ||
|
|
2756517f7a | ||
|
|
0f9f30b32b | ||
|
|
b5c4fe1971 | ||
|
|
d8e95e5452 | ||
|
|
00bf80cb8e | ||
|
|
7cc571510b | ||
|
|
f5c93fc993 | ||
|
|
2927921942 | ||
|
|
0b5c967813 | ||
|
|
7292b7c0eb | ||
|
|
713145d3de | ||
|
|
65a9eb8994 | ||
|
|
66f4949e7f | ||
|
|
1b2d6d55c5 | ||
|
|
71c9f8de6d | ||
|
|
70ea9593ff | ||
|
|
0a363f9ca4 | ||
|
|
e22b71810e | ||
|
|
fc8b3d8809 | ||
|
|
179c0953ff | ||
|
|
3a2fe5054f | ||
|
|
a1901abd6b | ||
|
|
c4a55ac4a4 | ||
|
|
d9f1dccba9 | ||
|
|
d0365bc8b0 | ||
|
|
b114a45f5f | ||
|
|
8bcfc2eaad | ||
|
|
13e9029f44 | ||
|
|
3d2acc930f | ||
|
|
9bc74743d5 | ||
|
|
1c5e715e5e | ||
|
|
1381cd05b0 | ||
|
|
2d577283ab | ||
|
|
b106080fb4 | ||
|
|
84a7191410 | ||
|
|
d804285139 | ||
|
|
9ee3b9775f | ||
|
|
90550f598e | ||
|
|
8ad4676f35 | ||
|
|
9d64e4dbd6 | ||
|
|
e17553e185 | ||
|
|
e8e7012265 | ||
|
|
8ec2217103 | ||
|
|
bb2577f6b7 | ||
|
|
43f1c82457 | ||
|
|
626afd7e89 | ||
|
|
c8d9383cfb | ||
|
|
a25661b2eb | ||
|
|
3e5749b99f | ||
|
|
53f1c4da81 | ||
|
|
a8878960c0 | ||
|
|
9e19c6aab4 | ||
|
|
d2f0ec12d5 | ||
|
|
e1071fd625 | ||
|
|
33f64ca7d6 | ||
|
|
0a00b7ff14 | ||
|
|
5636e597c3 | ||
|
|
3b754aea27 | ||
|
|
0ad087273c | ||
|
|
731e08c63a | ||
|
|
ddfdf94506 | ||
|
|
6600f0bd57 | ||
|
|
a27c1fd74b | ||
|
|
74af356baf | ||
|
|
b8b172466f | ||
|
|
ff40c8099d | ||
|
|
594f2853e0 | ||
|
|
7950aa8a27 | ||
|
|
2c9b4a5f16 | ||
|
|
dcb9c2e8ae | ||
|
|
3f2f7efb87 | ||
|
|
40de534238 | ||
|
|
e40d88cff3 | ||
|
|
6aa87f8ce3 | ||
|
|
8a33d217bd | ||
|
|
6dade80048 | ||
|
|
80d6dc9783 | ||
|
|
fb0e14ee9a | ||
|
|
5f716fa777 | ||
|
|
29ae04af3b | ||
|
|
3f58fc848d | ||
|
|
0963d39ea6 | ||
|
|
b0b2cac057 | ||
|
|
d882fbca38 | ||
|
|
5a9cdaa6e9 | ||
|
|
adc96d4236 | ||
|
|
7e8083eb48 | ||
|
|
982d9eb211 | ||
|
|
792263c97c | ||
|
|
2ab6e67ab7 | ||
|
|
2814028ce5 | ||
|
|
b0f4119b8b | ||
|
|
3f534d3fdf | ||
|
|
17f2a512f3 | ||
|
|
e288499c60 | ||
|
|
afa18f1baa | ||
|
|
ce669863b9 | ||
|
|
7a0dcea3e5 | ||
|
|
f20dadb649 | ||
|
|
e4cdecb310 | ||
|
|
e1943d1353 | ||
|
|
4ca054a4ea | ||
|
|
ff55300b91 | ||
|
|
96e460df2e | ||
|
|
31d072aea0 | ||
|
|
93f84e0373 | ||
|
|
b755f60ce2 | ||
|
|
a764869623 | ||
|
|
b859919acc | ||
|
|
de7f049527 | ||
|
|
fe52dae6bd | ||
|
|
10332c175c |
@@ -14,7 +14,7 @@ jobs:
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
dockerhubuploadlatest:
|
||||
docker:
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
# until all of the platforms are built.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
workflows:
|
||||
build:
|
||||
|
||||
8
.git-blame-ignore-revs
Normal file
8
.git-blame-ignore-revs
Normal file
@@ -0,0 +1,8 @@
|
||||
# Black reformatting (#5482).
|
||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
||||
|
||||
# Target Python 3.5 with black (#8664).
|
||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,13 +6,14 @@
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.pyc
|
||||
*.py[cod]
|
||||
*.snap
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
|
||||
283
CHANGES.md
283
CHANGES.md
@@ -1,13 +1,290 @@
|
||||
Synapse 1.27.0rc1 (2021-02-02)
|
||||
Synapse 1.30.0 (2021-03-22)
|
||||
===========================
|
||||
|
||||
Note that this release deprecates the ability for appservices to
|
||||
call `POST /_matrix/client/r0/register` without the body parameter `type`. Appservice
|
||||
developers should use a `type` value of `m.login.application_service` as
|
||||
per [the spec](https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions).
|
||||
In future releases, calling this endpoint with an access token - but without a `m.login.application_service`
|
||||
type - will fail.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.30.0rc1 (2021-03-16)
|
||||
==============================
|
||||
|
||||
Note that this release includes a change in Synapse to use Redis as a cache ─ as well as a pub/sub mechanism ─ if Redis support is enabled. No action is needed by server administrators, and we do not expect resource usage of the Redis instance to change dramatically.
|
||||
Features
|
||||
--------
|
||||
|
||||
This release also changes the callback URI for OpenID Connect (OIDC) identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
|
||||
- Add prometheus metrics for number of users successfully registering and logging in. ([\#9510](https://github.com/matrix-org/synapse/issues/9510), [\#9511](https://github.com/matrix-org/synapse/issues/9511), [\#9573](https://github.com/matrix-org/synapse/issues/9573))
|
||||
- Add `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time` prometheus metrics, which monitor federation delays by reporting the timestamps of messages sent and received to a set of remote servers. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Add support for generating JSON Web Tokens dynamically for use as OIDC client secrets. ([\#9549](https://github.com/matrix-org/synapse/issues/9549))
|
||||
- Optimise handling of incomplete room history for incoming federation. ([\#9601](https://github.com/matrix-org/synapse/issues/9601))
|
||||
- Finalise support for allowing clients to pick an SSO Identity Provider ([MSC2858](https://github.com/matrix-org/matrix-doc/pull/2858)). ([\#9617](https://github.com/matrix-org/synapse/issues/9617))
|
||||
- Tell spam checker modules about the SSO IdP a user registered through if one was used. ([\#9626](https://github.com/matrix-org/synapse/issues/9626))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix long-standing bug when generating thumbnails for some images with transparency: `TypeError: cannot unpack non-iterable int object`. ([\#9473](https://github.com/matrix-org/synapse/issues/9473))
|
||||
- Purge chain cover indexes for events that were purged prior to Synapse v1.29.0. ([\#9542](https://github.com/matrix-org/synapse/issues/9542), [\#9583](https://github.com/matrix-org/synapse/issues/9583))
|
||||
- Fix bug where federation requests were not correctly retried on 5xx responses. ([\#9567](https://github.com/matrix-org/synapse/issues/9567))
|
||||
- Fix re-activating an account via the admin API when local passwords are disabled. ([\#9587](https://github.com/matrix-org/synapse/issues/9587))
|
||||
- Fix a bug introduced in Synapse 1.20 which caused incoming federation transactions to stack up, causing slow recovery from outages. ([\#9597](https://github.com/matrix-org/synapse/issues/9597))
|
||||
- Fix a bug introduced in v1.28.0 where the OpenID Connect callback endpoint could error with a `MacaroonInitException`. ([\#9620](https://github.com/matrix-org/synapse/issues/9620))
|
||||
- Fix Internal Server Error on `GET /_synapse/client/saml2/authn_response` request. ([\#9623](https://github.com/matrix-org/synapse/issues/9623))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Make use of an improved malloc implementation (`jemalloc`) in the docker image. ([\#8553](https://github.com/matrix-org/synapse/issues/8553))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add relayd entry to reverse proxy example configurations. ([\#9508](https://github.com/matrix-org/synapse/issues/9508))
|
||||
- Improve the SAML2 upgrade notes for 1.27.0. ([\#9550](https://github.com/matrix-org/synapse/issues/9550))
|
||||
- Link to the "List user's media" admin API from the media admin API docs. ([\#9571](https://github.com/matrix-org/synapse/issues/9571))
|
||||
- Clarify the spam checker modules documentation example to mention that `parse_config` is a required method. ([\#9580](https://github.com/matrix-org/synapse/issues/9580))
|
||||
- Clarify the sample configuration for `stats` settings. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The `synapse_federation_last_sent_pdu_age` and `synapse_federation_last_received_pdu_age` prometheus metrics have been removed. They are replaced by `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time`. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Registering an Application Service user without using the `m.login.application_service` login type will be unsupported in an upcoming Synapse release. ([\#9559](https://github.com/matrix-org/synapse/issues/9559))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add tests to ResponseCache. ([\#9458](https://github.com/matrix-org/synapse/issues/9458))
|
||||
- Add type hints to purge room and server notice admin API. ([\#9520](https://github.com/matrix-org/synapse/issues/9520))
|
||||
- Add extra logging to ObservableDeferred when callbacks throw exceptions. ([\#9523](https://github.com/matrix-org/synapse/issues/9523))
|
||||
- Fix incorrect type hints. ([\#9528](https://github.com/matrix-org/synapse/issues/9528), [\#9543](https://github.com/matrix-org/synapse/issues/9543), [\#9591](https://github.com/matrix-org/synapse/issues/9591), [\#9608](https://github.com/matrix-org/synapse/issues/9608), [\#9618](https://github.com/matrix-org/synapse/issues/9618))
|
||||
- Add an additional test for purging a room. ([\#9541](https://github.com/matrix-org/synapse/issues/9541))
|
||||
- Add a `.git-blame-ignore-revs` file with the hashes of auto-formatting. ([\#9560](https://github.com/matrix-org/synapse/issues/9560))
|
||||
- Increase the threshold before which outbound federation to a server goes into "catch up" mode, which is expensive for the remote server to handle. ([\#9561](https://github.com/matrix-org/synapse/issues/9561))
|
||||
- Fix spurious errors reported by the `config-lint.sh` script. ([\#9562](https://github.com/matrix-org/synapse/issues/9562))
|
||||
- Fix type hints and tests for BlacklistingAgentWrapper and BlacklistingReactorWrapper. ([\#9563](https://github.com/matrix-org/synapse/issues/9563))
|
||||
- Do not have mypy ignore type hints from unpaddedbase64. ([\#9568](https://github.com/matrix-org/synapse/issues/9568))
|
||||
- Improve efficiency of calculating the auth chain in large rooms. ([\#9576](https://github.com/matrix-org/synapse/issues/9576))
|
||||
- Convert `synapse.types.Requester` to an `attrs` class. ([\#9586](https://github.com/matrix-org/synapse/issues/9586))
|
||||
- Add logging for redis connection setup. ([\#9590](https://github.com/matrix-org/synapse/issues/9590))
|
||||
- Improve logging when processing incoming transactions. ([\#9596](https://github.com/matrix-org/synapse/issues/9596))
|
||||
- Remove unused `stats.retention` setting, and emit a warning if stats are disabled. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
- Prevent attempting to bundle aggregations for state events in /context APIs. ([\#9619](https://github.com/matrix-org/synapse/issues/9619))
|
||||
|
||||
|
||||
Synapse 1.29.0 (2021-03-08)
|
||||
===========================
|
||||
|
||||
Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.29.0rc1 (2021-03-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add rate limiters to cross-user key sharing requests. ([\#8957](https://github.com/matrix-org/synapse/issues/8957))
|
||||
- Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel. ([\#8978](https://github.com/matrix-org/synapse/issues/8978))
|
||||
- Add some configuration settings to make users' profile data more private. ([\#9203](https://github.com/matrix-org/synapse/issues/9203))
|
||||
- The `no_proxy` and `NO_PROXY` environment variables are now respected in proxied HTTP clients with the lowercase form taking precedence if both are present. Additionally, the lowercase `https_proxy` environment variable is now respected in proxied HTTP clients on top of existing support for the uppercase `HTTPS_PROXY` form and takes precedence if both are present. Contributed by Timothy Leung. ([\#9372](https://github.com/matrix-org/synapse/issues/9372))
|
||||
- Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users. ([\#9383](https://github.com/matrix-org/synapse/issues/9383), [\#9385](https://github.com/matrix-org/synapse/issues/9385))
|
||||
- Add support for regenerating thumbnails if they have been deleted but the original image is still stored. ([\#9438](https://github.com/matrix-org/synapse/issues/9438))
|
||||
- Add support for `X-Forwarded-Proto` header when using a reverse proxy. ([\#9472](https://github.com/matrix-org/synapse/issues/9472), [\#9501](https://github.com/matrix-org/synapse/issues/9501), [\#9512](https://github.com/matrix-org/synapse/issues/9512), [\#9539](https://github.com/matrix-org/synapse/issues/9539))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug where users' pushers were not all deleted when they deactivated their account. ([\#9285](https://github.com/matrix-org/synapse/issues/9285), [\#9516](https://github.com/matrix-org/synapse/issues/9516))
|
||||
- Fix a bug where a lot of unnecessary presence updates were sent when joining a room. ([\#9402](https://github.com/matrix-org/synapse/issues/9402))
|
||||
- Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results. ([\#9416](https://github.com/matrix-org/synapse/issues/9416))
|
||||
- Fix a bug in single sign-on which could cause a "No session cookie found" error. ([\#9436](https://github.com/matrix-org/synapse/issues/9436))
|
||||
- Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined. ([\#9440](https://github.com/matrix-org/synapse/issues/9440))
|
||||
- Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`. ([\#9449](https://github.com/matrix-org/synapse/issues/9449))
|
||||
- Fix deleting pushers when using sharded pushers. ([\#9465](https://github.com/matrix-org/synapse/issues/9465), [\#9466](https://github.com/matrix-org/synapse/issues/9466), [\#9479](https://github.com/matrix-org/synapse/issues/9479), [\#9536](https://github.com/matrix-org/synapse/issues/9536))
|
||||
- Fix missing startup checks for the consistency of certain PostgreSQL sequences. ([\#9470](https://github.com/matrix-org/synapse/issues/9470))
|
||||
- Fix a long-standing bug where the media repository could leak file descriptors while previewing media. ([\#9497](https://github.com/matrix-org/synapse/issues/9497))
|
||||
- Properly purge the event chain cover index when purging history. ([\#9498](https://github.com/matrix-org/synapse/issues/9498))
|
||||
- Fix missing chain cover index due to a schema delta not being applied correctly. Only affected servers that ran development versions. ([\#9503](https://github.com/matrix-org/synapse/issues/9503))
|
||||
- Fix a bug introduced in v1.25.0 where `/_synapse/admin/join/` would fail when given a room alias. ([\#9506](https://github.com/matrix-org/synapse/issues/9506))
|
||||
- Prevent presence background jobs from running when presence is disabled. ([\#9530](https://github.com/matrix-org/synapse/issues/9530))
|
||||
- Fix rare edge case that caused a background update to fail if the server had rejected an event that had duplicate auth events. ([\#9537](https://github.com/matrix-org/synapse/issues/9537))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update the example systemd config to propagate reloads to individual units. ([\#9463](https://github.com/matrix-org/synapse/issues/9463))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add documentation and type hints to `parse_duration`. ([\#9432](https://github.com/matrix-org/synapse/issues/9432))
|
||||
- Remove vestiges of `uploads_path` configuration setting. ([\#9462](https://github.com/matrix-org/synapse/issues/9462))
|
||||
- Add a comment about systemd-python. ([\#9464](https://github.com/matrix-org/synapse/issues/9464))
|
||||
- Test that we require validated email for email pushers. ([\#9496](https://github.com/matrix-org/synapse/issues/9496))
|
||||
- Allow python to generate bytecode for synapse. ([\#9502](https://github.com/matrix-org/synapse/issues/9502))
|
||||
- Fix incorrect type hints. ([\#9515](https://github.com/matrix-org/synapse/issues/9515), [\#9518](https://github.com/matrix-org/synapse/issues/9518))
|
||||
- Add type hints to device and event report admin API. ([\#9519](https://github.com/matrix-org/synapse/issues/9519))
|
||||
- Add type hints to user admin API. ([\#9521](https://github.com/matrix-org/synapse/issues/9521))
|
||||
- Bump the versions of mypy and mypy-zope used for static type checking. ([\#9529](https://github.com/matrix-org/synapse/issues/9529))
|
||||
|
||||
|
||||
Synapse 1.28.0 (2021-02-25)
|
||||
===========================
|
||||
|
||||
Note that this release drops support for ARMv7 in the official Docker images, due to repeated problems building for ARMv7 (and the associated maintenance burden this entails).
|
||||
|
||||
This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Revert change in v1.28.0rc1 to remove the deprecated SAML endpoint. ([\#9474](https://github.com/matrix-org/synapse/issues/9474))
|
||||
|
||||
|
||||
Synapse 1.28.0rc1 (2021-02-19)
|
||||
==============================
|
||||
|
||||
Removal warning
|
||||
---------------
|
||||
|
||||
The v1 list accounts API is deprecated and will be removed in a future release.
|
||||
This API was undocumented and misleading. It can be replaced by the
|
||||
[v2 list accounts API](https://github.com/matrix-org/synapse/blob/release-v1.28.0/docs/admin_api/user_admin_api.rst#list-accounts),
|
||||
which has been available since Synapse 1.7.0 (2019-12-13).
|
||||
|
||||
Please check if you're using any scripts which use the admin API and replace
|
||||
`GET /_synapse/admin/v1/users/<user_id>` with `GET /_synapse/admin/v2/users`.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- New admin API to get the context of an event: `/_synapse/admin/rooms/{roomId}/context/{eventId}`. ([\#9150](https://github.com/matrix-org/synapse/issues/9150))
|
||||
- Further improvements to the user experience of registration via single sign-on. ([\#9300](https://github.com/matrix-org/synapse/issues/9300), [\#9301](https://github.com/matrix-org/synapse/issues/9301))
|
||||
- Add hook to spam checker modules that allow checking file uploads and remote downloads. ([\#9311](https://github.com/matrix-org/synapse/issues/9311))
|
||||
- Add support for receiving OpenID Connect authentication responses via form `POST`s rather than `GET`s. ([\#9376](https://github.com/matrix-org/synapse/issues/9376))
|
||||
- Add the shadow-banning status to the admin API for user info. ([\#9400](https://github.com/matrix-org/synapse/issues/9400))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix long-standing bug where sending email notifications would fail for rooms that the server had since left. ([\#9257](https://github.com/matrix-org/synapse/issues/9257))
|
||||
- Fix bug introduced in Synapse 1.27.0rc1 which meant the "session expired" error page during SSO registration was badly formatted. ([\#9296](https://github.com/matrix-org/synapse/issues/9296))
|
||||
- Assert a maximum length for some parameters for spec compliance. ([\#9321](https://github.com/matrix-org/synapse/issues/9321), [\#9393](https://github.com/matrix-org/synapse/issues/9393))
|
||||
- Fix additional errors when previewing URLs: "AttributeError 'NoneType' object has no attribute 'xpath'" and "ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.". ([\#9333](https://github.com/matrix-org/synapse/issues/9333))
|
||||
- Fix a bug causing Synapse to impose the wrong type constraints on fields when processing responses from appservices to `/_matrix/app/v1/thirdparty/user/{protocol}`. ([\#9361](https://github.com/matrix-org/synapse/issues/9361))
|
||||
- Fix bug where Synapse would occasionally stop reconnecting to Redis after the connection was lost. ([\#9391](https://github.com/matrix-org/synapse/issues/9391))
|
||||
- Fix a long-standing bug when upgrading a room: "TypeError: '>' not supported between instances of 'NoneType' and 'int'". ([\#9395](https://github.com/matrix-org/synapse/issues/9395))
|
||||
- Reduce the amount of memory used when generating the URL preview of a file that is larger than the `max_spider_size`. ([\#9421](https://github.com/matrix-org/synapse/issues/9421))
|
||||
- Fix a long-standing bug in the deduplication of old presence, resulting in no deduplication. ([\#9425](https://github.com/matrix-org/synapse/issues/9425))
|
||||
- The `ui_auth.session_timeout` config option can now be specified in terms of number of seconds/minutes/etc/. Contributed by Rishabh Arya. ([\#9426](https://github.com/matrix-org/synapse/issues/9426))
|
||||
- Fix a bug introduced in v1.27.0: "TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType." related to the user directory. ([\#9428](https://github.com/matrix-org/synapse/issues/9428))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Drop support for ARMv7 in Docker images. ([\#9433](https://github.com/matrix-org/synapse/issues/9433))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Reorganize CHANGELOG.md. ([\#9281](https://github.com/matrix-org/synapse/issues/9281))
|
||||
- Add note to `auto_join_rooms` config option explaining existing rooms must be publicly joinable. ([\#9291](https://github.com/matrix-org/synapse/issues/9291))
|
||||
- Correct name of Synapse's service file in TURN howto. ([\#9308](https://github.com/matrix-org/synapse/issues/9308))
|
||||
- Fix the braces in the `oidc_providers` section of the sample config. ([\#9317](https://github.com/matrix-org/synapse/issues/9317))
|
||||
- Update installation instructions on Fedora. ([\#9322](https://github.com/matrix-org/synapse/issues/9322))
|
||||
- Add HTTP/2 support to the nginx example configuration. Contributed by David Vo. ([\#9390](https://github.com/matrix-org/synapse/issues/9390))
|
||||
- Update docs for using Gitea as OpenID provider. ([\#9404](https://github.com/matrix-org/synapse/issues/9404))
|
||||
- Document that pusher instances are shardable. ([\#9407](https://github.com/matrix-org/synapse/issues/9407))
|
||||
- Fix erroneous documentation from v1.27.0 about updating the SAML2 callback URL. ([\#9434](https://github.com/matrix-org/synapse/issues/9434))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate old admin API `GET /_synapse/admin/v1/users/<user_id>`. ([\#9429](https://github.com/matrix-org/synapse/issues/9429))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix 'object name reserved for internal use' errors with recent versions of SQLite. ([\#9003](https://github.com/matrix-org/synapse/issues/9003))
|
||||
- Add experimental support for running Synapse with PyPy. ([\#9123](https://github.com/matrix-org/synapse/issues/9123))
|
||||
- Deny access to additional IP addresses by default. ([\#9240](https://github.com/matrix-org/synapse/issues/9240))
|
||||
- Update the `Cursor` type hints to better match PEP 249. ([\#9299](https://github.com/matrix-org/synapse/issues/9299))
|
||||
- Add debug logging for SRV lookups. Contributed by @Bubu. ([\#9305](https://github.com/matrix-org/synapse/issues/9305))
|
||||
- Improve logging for OIDC login flow. ([\#9307](https://github.com/matrix-org/synapse/issues/9307))
|
||||
- Share the code for handling required attributes between the CAS and SAML handlers. ([\#9326](https://github.com/matrix-org/synapse/issues/9326))
|
||||
- Clean up the code to load the metadata for OpenID Connect identity providers. ([\#9362](https://github.com/matrix-org/synapse/issues/9362))
|
||||
- Convert tests to use `HomeserverTestCase`. ([\#9377](https://github.com/matrix-org/synapse/issues/9377), [\#9396](https://github.com/matrix-org/synapse/issues/9396))
|
||||
- Update the version of black used to 20.8b1. ([\#9381](https://github.com/matrix-org/synapse/issues/9381))
|
||||
- Allow OIDC config to override discovered values. ([\#9384](https://github.com/matrix-org/synapse/issues/9384))
|
||||
- Remove some dead code from the acceptance of room invites path. ([\#9394](https://github.com/matrix-org/synapse/issues/9394))
|
||||
- Clean up an unused method in the presence handler code. ([\#9408](https://github.com/matrix-org/synapse/issues/9408))
|
||||
|
||||
|
||||
Synapse 1.27.0 (2021-02-16)
|
||||
===========================
|
||||
|
||||
Note that this release includes a change in Synapse to use Redis as a cache ─ as well as a pub/sub mechanism ─ if Redis support is enabled for workers. No action is needed by server administrators, and we do not expect resource usage of the Redis instance to change dramatically.
|
||||
|
||||
This release also changes the callback URI for OpenID Connect (OIDC) and SAML2 identity providers. If your server is configured to use single sign-on via an OIDC/OAuth2 or SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
|
||||
|
||||
This release also changes escaping of variables in the HTML templates for SSO or email notifications. If you have customised these templates, please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix building Docker images for armv7. ([\#9405](https://github.com/matrix-org/synapse/issues/9405))
|
||||
|
||||
|
||||
Synapse 1.27.0rc2 (2021-02-11)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Further improvements to the user experience of registration via single sign-on. ([\#9297](https://github.com/matrix-org/synapse/issues/9297))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix ratelimiting introduced in v1.27.0rc1 for invites to respect the `ratelimit` flag on application services. ([\#9302](https://github.com/matrix-org/synapse/issues/9302))
|
||||
- Do not automatically calculate `public_baseurl` since it can be wrong in some situations. Reverts behaviour introduced in v1.26.0. ([\#9313](https://github.com/matrix-org/synapse/issues/9313))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify the sample configuration for changes made to the template loading code. ([\#9310](https://github.com/matrix-org/synapse/issues/9310))
|
||||
|
||||
|
||||
Synapse 1.27.0rc1 (2021-02-02)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
|
||||
271
CONTRIBUTING.md
271
CONTRIBUTING.md
@@ -1,4 +1,31 @@
|
||||
# Contributing code to Synapse
|
||||
Welcome to Synapse
|
||||
|
||||
This document aims to get you started with contributing to this repo!
|
||||
|
||||
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
|
||||
- [2. What do I need?](#2-what-do-i-need)
|
||||
- [3. Get the source.](#3-get-the-source)
|
||||
- [4. Install the dependencies](#4-install-the-dependencies)
|
||||
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
|
||||
* [Under Windows](#under-windows)
|
||||
- [5. Get in touch.](#5-get-in-touch)
|
||||
- [6. Pick an issue.](#6-pick-an-issue)
|
||||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
||||
- [8. Test, test, test!](#8-test-test-test)
|
||||
* [Run the linters.](#run-the-linters)
|
||||
* [Run the unit tests.](#run-the-unit-tests)
|
||||
* [Run the integration tests.](#run-the-integration-tests)
|
||||
- [9. Submit your patch.](#9-submit-your-patch)
|
||||
* [Changelog](#changelog)
|
||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
||||
+ [Debian changelog](#debian-changelog)
|
||||
* [Sign off](#sign-off)
|
||||
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
|
||||
- [11. Find a new issue.](#11-find-a-new-issue)
|
||||
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
# 1. Who can contribute to Synapse?
|
||||
|
||||
Everyone is welcome to contribute code to [matrix.org
|
||||
projects](https://github.com/matrix-org), provided that they are willing to
|
||||
@@ -9,70 +36,179 @@ license the code under the same terms as the project's overall 'outbound'
|
||||
license - in our case, this is almost always Apache Software License v2 (see
|
||||
[LICENSE](LICENSE)).
|
||||
|
||||
## How to contribute
|
||||
# 2. What do I need?
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
The preferred and easiest way to contribute changes is to fork the relevant
|
||||
project on github, and then [create a pull request](
|
||||
project on GitHub, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||
changes into our repo.
|
||||
|
||||
Some other points to follow:
|
||||
Please base your changes on the `develop` branch.
|
||||
|
||||
* Please base your changes on the `develop` branch.
|
||||
```sh
|
||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
* Please follow the [code style requirements](#code-style).
|
||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
||||
can find many good git tutorials on the web.
|
||||
|
||||
* Please include a [changelog entry](#changelog) with each PR.
|
||||
# 4. Install the dependencies
|
||||
|
||||
* Please [sign off](#sign-off) your contribution.
|
||||
## Under Unix (macOS, Linux, BSD, ...)
|
||||
|
||||
* Please keep an eye on the pull request for feedback from the [continuous
|
||||
integration system](#continuous-integration-and-testing) and try to fix any
|
||||
errors that come up.
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
|
||||
* If you need to [update your PR](#updating-your-pull-request), just add new
|
||||
commits to your branch rather than rebasing.
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,lint,mypy,test]"
|
||||
pip install tox
|
||||
```
|
||||
|
||||
## Code style
|
||||
This will install the developer dependencies for the project.
|
||||
|
||||
## Under Windows
|
||||
|
||||
TBD
|
||||
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
||||
to work on.
|
||||
|
||||
|
||||
# 7. Turn coffee and documentation into code and documentation!
|
||||
|
||||
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
||||
it, including the conventions for the [sample configuration
|
||||
file](docs/code_style.md#configuration-file-format).
|
||||
|
||||
Many of the conventions are enforced by scripts which are run as part of the
|
||||
[continuous integration system](#continuous-integration-and-testing). To help
|
||||
check if you have followed the code style, you can run `scripts-dev/lint.sh`
|
||||
locally. You'll need python 3.6 or later, and to install a number of tools:
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
```
|
||||
# Install the dependencies
|
||||
pip install -e ".[lint,mypy]"
|
||||
If you add new files added to either of these folders, please use [GitHub-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/).
|
||||
|
||||
# Run the linter script
|
||||
Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
|
||||
## Run the linters.
|
||||
|
||||
The linters look at your code and do two things:
|
||||
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
They're pretty fast, don't hesitate!
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
**Note that the script does not just test/check, but also reformats code, so you
|
||||
may wish to ensure any new code is committed first**.
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
Make sure that you have saved all your files.
|
||||
|
||||
By default, this script checks all files and can take some time; if you alter
|
||||
only certain files, you might wish to specify paths as arguments to reduce the
|
||||
run-time:
|
||||
If you wish to restrict the linters to only the files changed since the last commit
|
||||
(much faster!), you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
You can also provide the `-d` option, which will lint the files that have been
|
||||
changed since the last git commit. This will often be significantly faster than
|
||||
linting the whole codebase.
|
||||
## Run the unit tests.
|
||||
|
||||
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||
files that were corrected.
|
||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs:
|
||||
|
||||
```sh
|
||||
less _trial_temp/test.log
|
||||
```
|
||||
|
||||
## Run the integration tests.
|
||||
|
||||
The integration tests are a more comprehensive suite of tests. They
|
||||
run a full version of Synapse, including your changes, to check if
|
||||
anything was broken. They are slower than the unit tests but will
|
||||
typically catch more errors.
|
||||
|
||||
The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
# 9. Submit your patch.
|
||||
|
||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||
|
||||
To prepare a Pull Request, please:
|
||||
|
||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
||||
2. [sign off](#sign-off) your contribution;
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
## Changelog
|
||||
|
||||
@@ -156,24 +292,6 @@ directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
## Documentation
|
||||
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
New files added to both folders should be written in [Github-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
|
||||
should be made to migrate existing documents to markdown where possible.
|
||||
|
||||
Some documentation also exists in [Synapse's Github
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
## Sign off
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
@@ -240,47 +358,36 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
## Continuous integration and testing
|
||||
|
||||
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
|
||||
run a series of checks and tests against any PR which is opened against the
|
||||
project; if your change breaks the build, this will be shown in GitHub, with
|
||||
links to the build results. If your build fails, please try to fix the errors
|
||||
and update your branch.
|
||||
# 10. Turn feedback into better code.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
Once the Pull Request is opened, you will see a few things:
|
||||
|
||||
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
|
||||
for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
|
||||
(requires a running local PostgreSQL with access to create databases).
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
|
||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||
set up PostgreSQL yourself.
|
||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
||||
|
||||
Docker images are available for running the integration tests (SyTest) locally,
|
||||
see the [documentation in the SyTest repo](
|
||||
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
|
||||
information.
|
||||
From this point, you should:
|
||||
|
||||
## Updating your pull request
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
|
||||
If you decide to make changes to your pull request - perhaps to address issues
|
||||
raised in a review, or to fix problems highlighted by [continuous
|
||||
integration](#continuous-integration-and-testing) - just add new commits to your
|
||||
branch, and push to GitHub. The pull request will automatically be updated.
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
Please **avoid** rebasing your branch, especially once the PR has been
|
||||
reviewed: doing so makes it very difficult for a reviewer to see what has
|
||||
changed since a previous review.
|
||||
# 11. Find a new issue.
|
||||
|
||||
## Notes for maintainers on merging PRs etc
|
||||
By now, you know the drill!
|
||||
|
||||
# Notes for maintainers on merging PRs etc
|
||||
|
||||
There are some notes for those with commit access to the project on how we
|
||||
manage git [here](docs/dev/git.md).
|
||||
|
||||
## Conclusion
|
||||
# Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
|
||||
20
INSTALL.md
20
INSTALL.md
@@ -151,29 +151,15 @@ sudo pacman -S base-devel python python-pip \
|
||||
|
||||
##### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS 8 or Fedora>26:
|
||||
Installing prerequisites on CentOS or Fedora Linux:
|
||||
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Installing prerequisites on CentOS 7 or Fedora<=25:
|
||||
|
||||
```sh
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
|
||||
uses SQLite 3.7. You may be able to work around this by installing a more
|
||||
recent SQLite version, but it is recommended that you instead use a Postgres
|
||||
database: see [docs/postgres.md](docs/postgres.md).
|
||||
|
||||
##### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
@@ -20,9 +20,10 @@ recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
include tests/http/ca.crt
|
||||
include tests/http/ca.key
|
||||
include tests/http/server.key
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.p8
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
|
||||
@@ -183,8 +183,9 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
|
||||
50
UPGRADE.rst
50
UPGRADE.rst
@@ -85,23 +85,51 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
Requirement for X-Forwarded-Proto header
|
||||
----------------------------------------
|
||||
|
||||
When using Synapse with a reverse proxy (in particular, when using the
|
||||
`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
|
||||
`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
|
||||
will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client. See the `reverse proxy documentation
|
||||
<docs/reverse_proxy.md>`_, where the example configurations have been updated to
|
||||
show how to set this header.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
Upgrading to v1.27.0
|
||||
====================
|
||||
|
||||
Changes to callback URI for OAuth2 / OpenID Connect
|
||||
---------------------------------------------------
|
||||
Changes to callback URI for OAuth2 / OpenID Connect and SAML2
|
||||
-------------------------------------------------------------
|
||||
|
||||
This version changes the URI used for callbacks from OAuth2 identity providers. If
|
||||
your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
|
||||
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
|
||||
to the list of permitted "redirect URIs" at the identity provider.
|
||||
This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
|
||||
|
||||
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
|
||||
Connect.
|
||||
* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
|
||||
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
|
||||
to the list of permitted "redirect URIs" at the identity provider.
|
||||
|
||||
(Note: a similar change is being made for SAML2; in this case the old URI
|
||||
``[synapse public baseurl]/_matrix/saml2`` is being deprecated, but will continue to
|
||||
work, so no immediate changes are required for existing installations.)
|
||||
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
|
||||
Connect.
|
||||
|
||||
* If your server is configured for single sign-on via a SAML2 identity provider, you will
|
||||
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
|
||||
"ACS location" (also known as "allowed callback URLs") at the identity provider.
|
||||
|
||||
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
|
||||
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
|
||||
provider uses this property to validate or otherwise identify Synapse, its configuration
|
||||
will need to be updated to use the new URL. Alternatively you could create a new, separate
|
||||
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
|
||||
the existing "EntityDescriptor" as they were.
|
||||
|
||||
Changes to HTML templates
|
||||
-------------------------
|
||||
|
||||
@@ -92,7 +92,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
return self.config["user"].split(":")[1]
|
||||
|
||||
def do_config(self, line):
|
||||
""" Show the config for this client: "config"
|
||||
"""Show the config for this client: "config"
|
||||
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
||||
Config variables:
|
||||
user: The username to auth with.
|
||||
@@ -360,7 +360,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
print(e)
|
||||
|
||||
def do_topic(self, line):
|
||||
""""topic [set|get] <roomid> [<newtopic>]"
|
||||
""" "topic [set|get] <roomid> [<newtopic>]"
|
||||
Set the topic for a room: topic set <roomid> <newtopic>
|
||||
Get the topic for a room: topic get <roomid>
|
||||
"""
|
||||
@@ -690,7 +690,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
self._do_presence_state(2, line)
|
||||
|
||||
def _parse(self, line, keys, force_keys=False):
|
||||
""" Parses the given line.
|
||||
"""Parses the given line.
|
||||
|
||||
Args:
|
||||
line : The line to parse
|
||||
@@ -721,7 +721,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
query_params={"access_token": None},
|
||||
alt_text=None,
|
||||
):
|
||||
""" Runs an HTTP request and pretty prints the output.
|
||||
"""Runs an HTTP request and pretty prints the output.
|
||||
|
||||
Args:
|
||||
method: HTTP method
|
||||
|
||||
@@ -23,11 +23,10 @@ from twisted.web.http_headers import Headers
|
||||
|
||||
|
||||
class HttpClient:
|
||||
""" Interface for talking json over http
|
||||
"""
|
||||
"""Interface for talking json over http"""
|
||||
|
||||
def put_json(self, url, data):
|
||||
""" Sends the specifed json data using PUT
|
||||
"""Sends the specifed json data using PUT
|
||||
|
||||
Args:
|
||||
url (str): The URL to PUT data to.
|
||||
@@ -41,7 +40,7 @@ class HttpClient:
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
""" Gets some json from the given host homeserver and path
|
||||
"""Gets some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -58,7 +57,7 @@ class HttpClient:
|
||||
|
||||
|
||||
class TwistedHttpClient(HttpClient):
|
||||
""" Wrapper around the twisted HTTP client api.
|
||||
"""Wrapper around the twisted HTTP client api.
|
||||
|
||||
Attributes:
|
||||
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
||||
@@ -87,8 +86,7 @@ class TwistedHttpClient(HttpClient):
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a PUT request
|
||||
"""
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
|
||||
if "Content-Type" not in headers_dict:
|
||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||
@@ -98,8 +96,7 @@ class TwistedHttpClient(HttpClient):
|
||||
)
|
||||
|
||||
def _create_get_request(self, url, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a GET request
|
||||
"""
|
||||
"""Wrapper of _create_request to issue a GET request"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -127,8 +124,7 @@ class TwistedHttpClient(HttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||
""" Creates and sends a request to the given url
|
||||
"""
|
||||
"""Creates and sends a request to the given url"""
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
@@ -185,8 +181,7 @@ class _RawProducer:
|
||||
|
||||
|
||||
class _JsonProducer:
|
||||
""" Used by the twisted http client to create the HTTP body from json
|
||||
"""
|
||||
"""Used by the twisted http client to create the HTTP body from json"""
|
||||
|
||||
def __init__(self, jsn):
|
||||
self.data = jsn
|
||||
|
||||
@@ -63,8 +63,7 @@ class CursesStdIO:
|
||||
self.redraw()
|
||||
|
||||
def redraw(self):
|
||||
""" method for redisplaying lines
|
||||
based on internal list of lines """
|
||||
"""method for redisplaying lines based on internal list of lines"""
|
||||
|
||||
self.stdscr.clear()
|
||||
self.paintStatus(self.statusText)
|
||||
|
||||
@@ -56,7 +56,7 @@ def excpetion_errback(failure):
|
||||
|
||||
|
||||
class InputOutput:
|
||||
""" This is responsible for basic I/O so that a user can interact with
|
||||
"""This is responsible for basic I/O so that a user can interact with
|
||||
the example app.
|
||||
"""
|
||||
|
||||
@@ -68,8 +68,7 @@ class InputOutput:
|
||||
self.server = server
|
||||
|
||||
def on_line(self, line):
|
||||
""" This is where we process commands.
|
||||
"""
|
||||
"""This is where we process commands."""
|
||||
|
||||
try:
|
||||
m = re.match(r"^join (\S+)$", line)
|
||||
@@ -133,7 +132,7 @@ class IOLoggerHandler(logging.Handler):
|
||||
|
||||
|
||||
class Room:
|
||||
""" Used to store (in memory) the current membership state of a room, and
|
||||
"""Used to store (in memory) the current membership state of a room, and
|
||||
which home servers we should send PDUs associated with the room to.
|
||||
"""
|
||||
|
||||
@@ -148,8 +147,7 @@ class Room:
|
||||
self.have_got_metadata = False
|
||||
|
||||
def add_participant(self, participant):
|
||||
""" Someone has joined the room
|
||||
"""
|
||||
"""Someone has joined the room"""
|
||||
self.participants.add(participant)
|
||||
self.invited.discard(participant)
|
||||
|
||||
@@ -160,14 +158,13 @@ class Room:
|
||||
self.oldest_server = server
|
||||
|
||||
def add_invited(self, invitee):
|
||||
""" Someone has been invited to the room
|
||||
"""
|
||||
"""Someone has been invited to the room"""
|
||||
self.invited.add(invitee)
|
||||
self.servers.add(origin_from_ucid(invitee))
|
||||
|
||||
|
||||
class HomeServer(ReplicationHandler):
|
||||
""" A very basic home server implentation that allows people to join a
|
||||
"""A very basic home server implentation that allows people to join a
|
||||
room and then invite other people.
|
||||
"""
|
||||
|
||||
@@ -181,8 +178,7 @@ class HomeServer(ReplicationHandler):
|
||||
self.output = output
|
||||
|
||||
def on_receive_pdu(self, pdu):
|
||||
""" We just received a PDU
|
||||
"""
|
||||
"""We just received a PDU"""
|
||||
pdu_type = pdu.pdu_type
|
||||
|
||||
if pdu_type == "sy.room.message":
|
||||
@@ -199,23 +195,20 @@ class HomeServer(ReplicationHandler):
|
||||
)
|
||||
|
||||
def _on_message(self, pdu):
|
||||
""" We received a message
|
||||
"""
|
||||
"""We received a message"""
|
||||
self.output.print_line(
|
||||
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
||||
)
|
||||
|
||||
def _on_join(self, context, joinee):
|
||||
""" Someone has joined a room, either a remote user or a local user
|
||||
"""
|
||||
"""Someone has joined a room, either a remote user or a local user"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_participant(joinee)
|
||||
|
||||
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
||||
|
||||
def _on_invite(self, origin, context, invitee):
|
||||
""" Someone has been invited
|
||||
"""
|
||||
"""Someone has been invited"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_invited(invitee)
|
||||
|
||||
@@ -228,8 +221,7 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_message(self, room_name, sender, body):
|
||||
""" Send a message to a room!
|
||||
"""
|
||||
"""Send a message to a room!"""
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
@@ -247,8 +239,7 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_room(self, room_name, sender, joinee):
|
||||
""" Join a room!
|
||||
"""
|
||||
"""Join a room!"""
|
||||
self._on_join(room_name, joinee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
@@ -269,8 +260,7 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_room(self, room_name, sender, invitee):
|
||||
""" Invite someone to a room!
|
||||
"""
|
||||
"""Invite someone to a room!"""
|
||||
self._on_invite(self.server_name, room_name, invitee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
@@ -193,15 +193,12 @@ class TrivialXmppClient:
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = (
|
||||
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||
% {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
)
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print("reply from ssrc announce: ", res)
|
||||
time.sleep(10)
|
||||
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -58,10 +58,10 @@ trap "rm -r $tmpdir" EXIT
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
@@ -87,7 +87,7 @@ PYTHONPATH="$tmpdir" \
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
# build the log config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
|
||||
30
debian/changelog
vendored
30
debian/changelog
vendored
@@ -1,8 +1,34 @@
|
||||
matrix-synapse-py3 (1.26.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
|
||||
|
||||
matrix-synapse-py3 (1.29.0) stable; urgency=medium
|
||||
|
||||
[ Jonathan de Jong ]
|
||||
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.29.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
|
||||
|
||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.28.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
|
||||
|
||||
matrix-synapse-py3 (1.27.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Fix build on Ubuntu 16.04 LTS (Xenial).
|
||||
|
||||
-- Dan Callahan <danc@element.io> Thu, 28 Jan 2021 16:21:03 +0000
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.27.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Feb 2021 13:11:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.26.0) stable; urgency=medium
|
||||
|
||||
|
||||
2
debian/synctl.1
vendored
2
debian/synctl.1
vendored
@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
|
||||
.
|
||||
.nf
|
||||
|
||||
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
.
|
||||
.fi
|
||||
.
|
||||
|
||||
2
debian/synctl.ronn
vendored
2
debian/synctl.ronn
vendored
@@ -41,7 +41,7 @@ process.
|
||||
|
||||
Configuration file may be generated as follows:
|
||||
|
||||
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
|
||||
## ENVIRONMENT
|
||||
|
||||
|
||||
@@ -28,11 +28,13 @@ RUN apt-get update && apt-get install -y \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
cryptography \
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
@@ -67,6 +69,7 @@ RUN apt-get update && apt-get install -y \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
|
||||
@@ -11,7 +11,6 @@ The image also does *not* provide a TURN server.
|
||||
By default, the image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* configuration files;
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
@@ -205,3 +204,8 @@ healthcheck:
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.md)
|
||||
@@ -89,7 +89,6 @@ federation_rc_concurrent: 3
|
||||
## Files ##
|
||||
|
||||
media_store_path: "/data/media"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import codecs
|
||||
import glob
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -213,6 +214,13 @@ def main(args, environ):
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
@@ -248,9 +256,9 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = ["python"] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
os.execve("/usr/local/bin/python", args, environ)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Contents
|
||||
- [List all media in a room](#list-all-media-in-a-room)
|
||||
- [Querying media](#querying-media)
|
||||
* [List all media in a room](#list-all-media-in-a-room)
|
||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
@@ -10,7 +12,11 @@
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# List all media in a room
|
||||
# Querying media
|
||||
|
||||
These APIs allow extracting media information from the homeserver.
|
||||
|
||||
## List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
However, it only shows media from unencrypted events or rooms.
|
||||
@@ -36,6 +42,12 @@ The API returns a JSON body like the following:
|
||||
}
|
||||
```
|
||||
|
||||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
||||
- [Make Room Admin API](#make-room-admin-api)
|
||||
- [Forward Extremities Admin API](#forward-extremities-admin-api)
|
||||
- [Event Context API](#event-context-api)
|
||||
|
||||
# List Room API
|
||||
|
||||
@@ -594,3 +595,121 @@ that were deleted.
|
||||
"deleted": 1
|
||||
}
|
||||
```
|
||||
|
||||
# Event Context API
|
||||
|
||||
This API lets a client find the context of an event. This is designed primarily to investigate abuse reports.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/context/<event_id>
|
||||
```
|
||||
|
||||
This API mimmicks [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-rooms-roomid-context-eventid). Please refer to the link for all details on parameters and reseponse.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"end": "t29-57_2_0_2",
|
||||
"events_after": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"msgtype": "m.text",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"event": {
|
||||
"content": {
|
||||
"body": "filename.jpg",
|
||||
"info": {
|
||||
"h": 398,
|
||||
"w": 394,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 31037
|
||||
},
|
||||
"url": "mxc://example.org/JWEIFJgwEIhweiWJE",
|
||||
"msgtype": "m.image"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$f3h4d129462ha:example.com",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
"events_before": [
|
||||
{
|
||||
"content": {
|
||||
"body": "something-important.doc",
|
||||
"filename": "something-important.doc",
|
||||
"info": {
|
||||
"mimetype": "application/msword",
|
||||
"size": 46144
|
||||
},
|
||||
"msgtype": "m.file",
|
||||
"url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"start": "t27-54_2_0_2",
|
||||
"state": [
|
||||
{
|
||||
"content": {
|
||||
"creator": "@example:example.org",
|
||||
"room_version": "1",
|
||||
"m.federate": true,
|
||||
"predecessor": {
|
||||
"event_id": "$something:example.org",
|
||||
"room_id": "!oldroom:example.org"
|
||||
}
|
||||
},
|
||||
"type": "m.room.create",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": ""
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"membership": "join",
|
||||
"avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF",
|
||||
"displayname": "Alice Margatroid"
|
||||
},
|
||||
"type": "m.room.member",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": "@alice:example.org"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -29,8 +29,9 @@ It returns a JSON body like the following:
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false,
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
@@ -150,6 +151,7 @@ A JSON body is returned with the following shape:
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
}, {
|
||||
@@ -158,6 +160,7 @@ A JSON body is returned with the following shape:
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
@@ -262,7 +265,7 @@ The following actions are performed when deactivating an user:
|
||||
- Reject all pending invites
|
||||
- Remove all account validity information related to the user
|
||||
|
||||
The following additional actions are performed during deactivation if``erase``
|
||||
The following additional actions are performed during deactivation if ``erase``
|
||||
is set to ``true``:
|
||||
|
||||
- Remove the user's display name
|
||||
@@ -376,11 +379,12 @@ The following fields are returned in the JSON response body:
|
||||
- ``total`` - Number of rooms.
|
||||
|
||||
|
||||
List media of an user
|
||||
================================
|
||||
List media of a user
|
||||
====================
|
||||
Gets a list of all local media that a specific ``user_id`` has created.
|
||||
The response is ordered by creation date descending and media ID descending.
|
||||
The newest media is on top.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
``order_by`` and ``dir``.
|
||||
|
||||
The API is::
|
||||
|
||||
@@ -437,6 +441,35 @@ The following parameters should be set in the URL:
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of media.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
|
||||
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
|
||||
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
|
||||
Smallest to largest. This is the default.
|
||||
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
|
||||
Smallest to largest.
|
||||
- ``media_length`` - Media are ordered by length of the media in bytes.
|
||||
Smallest to largest.
|
||||
- ``media_type`` - Media are ordered alphabetically by MIME-type.
|
||||
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
|
||||
initiated the quarantine request for this media.
|
||||
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
|
||||
from quarantining.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
|
||||
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
|
||||
|
||||
Caution. The database only has indexes on the columns ``media_id``,
|
||||
``user_id`` and ``created_ts``. This means that if a different sort order is used
|
||||
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
|
||||
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
|
||||
database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
|
||||
@@ -8,16 +8,16 @@ errors in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
|
||||
First install them with:
|
||||
|
||||
pip install -e ".[lint,mypy]"
|
||||
|
||||
- **black**
|
||||
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/)
|
||||
as an opinionated code formatter, ensuring all comitted code is
|
||||
properly formatted.
|
||||
|
||||
First install `black` with:
|
||||
|
||||
pip install --upgrade black
|
||||
|
||||
Have `black` auto-format your code (it shouldn't change any
|
||||
functionality) with:
|
||||
|
||||
@@ -28,10 +28,6 @@ The necessary tools are detailed below.
|
||||
`flake8` is a code checking tool. We require code to pass `flake8`
|
||||
before being merged into the codebase.
|
||||
|
||||
Install `flake8` with:
|
||||
|
||||
pip install --upgrade flake8 flake8-comprehensions
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
flake8 synapse tests
|
||||
@@ -41,10 +37,6 @@ The necessary tools are detailed below.
|
||||
`isort` ensures imports are nicely formatted, and can suggest and
|
||||
auto-fix issues such as double-importing.
|
||||
|
||||
Install `isort` with:
|
||||
|
||||
pip install --upgrade isort
|
||||
|
||||
Auto-fix imports with:
|
||||
|
||||
isort -rc synapse tests
|
||||
|
||||
@@ -226,7 +226,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "org.matrix.github" # optional: styling hint for clients
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -252,7 +252,7 @@ oidc_providers:
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "org.matrix.google" # optional: styling hint for clients
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -299,7 +299,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "org.matrix.gitlab" # optional: styling hint for clients
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -334,7 +334,7 @@ Synapse config:
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "org.matrix.facebook" # optional: styling hint for clients
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -365,7 +365,7 @@ login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new application.
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
@@ -386,5 +386,63 @@ oidc_providers:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
|
||||
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: xwiki
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_auth_method: none
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
## Apple
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
@@ -3,30 +3,31 @@
|
||||
It is recommended to put a reverse proxy such as
|
||||
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
|
||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
|
||||
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
||||
[HAProxy](https://www.haproxy.org/) or
|
||||
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
|
||||
of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
privileges.
|
||||
|
||||
You should configure your reverse proxy to forward requests to `/_matrix` or
|
||||
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
|
||||
`X-Forwarded-Proto` request headers.
|
||||
|
||||
You should remember that Matrix clients and other Matrix servers do not
|
||||
necessarily need to connect to your server via the same server name or
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
|
||||
the requested URI in any way (for example, by decoding `%xx` escapes).
|
||||
Beware that Apache *will* canonicalise URIs unless you specify
|
||||
`nocanon`.
|
||||
|
||||
When setting up a reverse proxy, remember that Matrix clients and other
|
||||
Matrix servers do not necessarily need to connect to your server via the
|
||||
same server name or port. Indeed, clients will use port 443 by default,
|
||||
whereas servers default to port 8448. Where these are different, we
|
||||
refer to the 'client port' and the 'federation port'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
Endpoints that are part of the standardised Matrix specification are
|
||||
located under `/_matrix`, whereas endpoints specific to Synapse are
|
||||
located under `/_synapse/client`.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
`https://matrix.example.com`, and other servers to connect at
|
||||
`https://example.com:8448`. The following sections detail the configuration of
|
||||
@@ -40,18 +41,21 @@ the reverse proxy and the homeserver.
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
|
||||
# For the federation port
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
listen 8448 ssl http2 default_server;
|
||||
listen [::]:8448 ssl http2 default_server;
|
||||
|
||||
server_name matrix.example.com;
|
||||
|
||||
location ~* ^(\/_matrix|\/_synapse\/client) {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
@@ -102,6 +106,7 @@ example.com:8448 {
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
@@ -113,6 +118,7 @@ example.com:8448 {
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
@@ -134,6 +140,9 @@ example.com:8448 {
|
||||
```
|
||||
frontend https
|
||||
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix-host hdr(host) -i matrix.example.com
|
||||
@@ -144,12 +153,62 @@ frontend https
|
||||
|
||||
frontend matrix-federation
|
||||
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Relayd
|
||||
|
||||
```
|
||||
table <webserver> { 127.0.0.1 }
|
||||
table <matrixserver> { 127.0.0.1 }
|
||||
|
||||
http protocol "https" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
match header set "X-Forwarded-For" value "$REMOTE_ADDR"
|
||||
match header set "X-Forwarded-Proto" value "https"
|
||||
|
||||
# set CORS header for .well-known/matrix/server, .well-known/matrix/client
|
||||
# httpd does not support setting headers, so do it here
|
||||
match request path "/.well-known/matrix/*" tag "matrix-cors"
|
||||
match response tagged "matrix-cors" header set "Access-Control-Allow-Origin" value "*"
|
||||
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
|
||||
# pass on non-matrix traffic to webserver
|
||||
pass forward to <webserver>
|
||||
}
|
||||
|
||||
relay "https_traffic" {
|
||||
listen on egress port 443 tls
|
||||
protocol "https"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
forward to <webserver> port 8080 check tcp
|
||||
}
|
||||
|
||||
http protocol "matrix" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
block
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
}
|
||||
|
||||
relay "matrix_federation" {
|
||||
listen on egress port 8448 tls
|
||||
protocol "matrix"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
}
|
||||
```
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
You will also want to set `bind_addresses: ['127.0.0.1']` and
|
||||
|
||||
@@ -74,10 +74,6 @@ pid_file: DATADIR/homeserver.pid
|
||||
# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
# 'listeners' below).
|
||||
#
|
||||
# If this is left unset, it defaults to 'https://<server_name>/'. (Note that
|
||||
# that will not work unless you configure Synapse or a reverse-proxy to listen
|
||||
# on port 443.)
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
@@ -93,8 +89,7 @@ pid_file: DATADIR/homeserver.pid
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
@@ -105,6 +100,14 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#limit_profile_requests_to_users_who_share_rooms: true
|
||||
|
||||
# Uncomment to prevent a user's profile data from being retrieved and
|
||||
# displayed in a room until they have joined it. By default, a user's
|
||||
# profile data is included in an invite event, regardless of the values
|
||||
# of the above two settings, and whether or not the users share a server.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#include_profile_data_on_invite: false
|
||||
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
@@ -169,6 +172,7 @@ pid_file: DATADIR/homeserver.pid
|
||||
# - '100.64.0.0/10'
|
||||
# - '192.0.0.0/24'
|
||||
# - '169.254.0.0/16'
|
||||
# - '192.88.99.0/24'
|
||||
# - '198.18.0.0/15'
|
||||
# - '192.0.2.0/24'
|
||||
# - '198.51.100.0/24'
|
||||
@@ -177,6 +181,9 @@ pid_file: DATADIR/homeserver.pid
|
||||
# - '::1/128'
|
||||
# - 'fe80::/10'
|
||||
# - 'fc00::/7'
|
||||
# - '2001:db8::/32'
|
||||
# - 'ff00::/8'
|
||||
# - 'fec0::/10'
|
||||
|
||||
# List of IP address CIDR ranges that should be allowed for federation,
|
||||
# identity servers, push servers, and for checking key validity for
|
||||
@@ -699,6 +706,12 @@ acme:
|
||||
# - matrix.org
|
||||
# - example.com
|
||||
|
||||
# Uncomment to disable profile lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain profile data of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -994,6 +1007,7 @@ media_store_path: "DATADIR/media_store"
|
||||
# - '100.64.0.0/10'
|
||||
# - '192.0.0.0/24'
|
||||
# - '169.254.0.0/16'
|
||||
# - '192.88.99.0/24'
|
||||
# - '198.18.0.0/15'
|
||||
# - '192.0.2.0/24'
|
||||
# - '198.51.100.0/24'
|
||||
@@ -1002,6 +1016,9 @@ media_store_path: "DATADIR/media_store"
|
||||
# - '::1/128'
|
||||
# - 'fe80::/10'
|
||||
# - 'fc00::/7'
|
||||
# - '2001:db8::/32'
|
||||
# - 'ff00::/8'
|
||||
# - 'fec0::/10'
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
@@ -1169,9 +1186,8 @@ account_validity:
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email'
|
||||
# configuration section. You should also check that 'public_baseurl' is set
|
||||
# correctly.
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
@@ -1262,7 +1278,8 @@ account_validity:
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
# in on this server.
|
||||
#
|
||||
# (By default, no suggestion is made, so it is left up to the client.)
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
@@ -1287,6 +1304,8 @@ account_validity:
|
||||
# by the Matrix Identity Service API specification:
|
||||
# https://matrix.org/docs/spec/identity_service/latest
|
||||
#
|
||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||
#
|
||||
account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
@@ -1320,6 +1339,8 @@ account_threepid_delegates:
|
||||
# By default, any room aliases included in this list will be created
|
||||
# as a publicly joinable room when the first user registers for the
|
||||
# homeserver. This behaviour can be customised with the settings below.
|
||||
# If the room already exists, make certain it is a publicly joinable
|
||||
# room. The join rule of the room must be set to 'public'.
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
@@ -1758,7 +1779,26 @@ saml2_config:
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -1862,9 +1902,9 @@ oidc_providers:
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{ user.login }"
|
||||
# display_name_template: "{ user.name }"
|
||||
# email_template: "{ user.email }"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# email_template: "{{ user.email }}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
@@ -1879,7 +1919,7 @@ oidc_providers:
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: org.matrix.github
|
||||
# idp_brand: github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -1891,8 +1931,8 @@ oidc_providers:
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{ user.login }"
|
||||
# display_name_template: "{ user.name }"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
|
||||
|
||||
# Enable Central Authentication Service (CAS) for registration and login.
|
||||
@@ -1938,9 +1978,9 @@ sso:
|
||||
# phishing attacks from evil.site. To avoid this, include a slash after the
|
||||
# hostname: "https://my.client/".
|
||||
#
|
||||
# The login fallback page (used by clients that don't natively support the
|
||||
# required login flows) is automatically whitelisted in addition to any URLs
|
||||
# in this list.
|
||||
# If public_baseurl is set, then the login fallback page (used by clients
|
||||
# that don't natively support the required login flows) is whitelisted in
|
||||
# addition to any URLs in this list.
|
||||
#
|
||||
# By default, this list is empty.
|
||||
#
|
||||
@@ -1961,8 +2001,7 @@ sso:
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL that the user will be redirected to after
|
||||
# login. Needs manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# login.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
@@ -2040,15 +2079,12 @@ sso:
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * display_url: the same as `redirect_url`, but with the query
|
||||
# parameters stripped. The intention is to have a
|
||||
# human-readable URL to show to users, not to use it as
|
||||
# the final address to redirect to. Needs manual escaping
|
||||
# (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# the final address to redirect to.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
@@ -2068,9 +2104,7 @@ sso:
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
@@ -2226,11 +2260,11 @@ password_config:
|
||||
#require_uppercase: true
|
||||
|
||||
ui_auth:
|
||||
# The number of milliseconds to allow a user-interactive authentication
|
||||
# session to be active.
|
||||
# The amount of time to allow a user-interactive authentication session
|
||||
# to be active.
|
||||
#
|
||||
# This defaults to 0, meaning the user is queried for their credentials
|
||||
# before every action, but this can be overridden to alow a single
|
||||
# before every action, but this can be overridden to allow a single
|
||||
# validation to be re-used. This weakens the protections afforded by
|
||||
# the user-interactive authentication process, by allowing for multiple
|
||||
# (and potentially different) operations to use the same validation session.
|
||||
@@ -2238,7 +2272,7 @@ ui_auth:
|
||||
# Uncomment below to allow for credential validation to last for 15
|
||||
# seconds.
|
||||
#
|
||||
#session_timeout: 15000
|
||||
#session_timeout: "15s"
|
||||
|
||||
|
||||
# Configuration for sending emails from Synapse.
|
||||
@@ -2528,19 +2562,35 @@ spam_checker:
|
||||
|
||||
# User Directory configuration
|
||||
#
|
||||
# 'enabled' defines whether users can search the user directory. If
|
||||
# false then empty responses are returned to all queries. Defaults to
|
||||
# true.
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||
# rebuild the user_directory search indexes, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
#user_directory:
|
||||
# enabled: true
|
||||
# search_all_users: false
|
||||
user_directory:
|
||||
# Defines whether users can search the user directory. If false then
|
||||
# empty responses are returned to all queries. Defaults to true.
|
||||
#
|
||||
# Uncomment to disable the user directory.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Defines whether to search all users visible to your HS when searching
|
||||
# the user directory, rather than limiting to users visible in public
|
||||
# rooms. Defaults to false.
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
#
|
||||
#search_all_users: true
|
||||
|
||||
# Defines whether to prefer local users in search query results.
|
||||
# If True, local users are more likely to appear above remote users
|
||||
# when searching the user directory. Defaults to false.
|
||||
#
|
||||
# Uncomment to prefer local over remote users in user directory search
|
||||
# results.
|
||||
#
|
||||
#prefer_local_users: true
|
||||
|
||||
|
||||
# User Consent configuration
|
||||
@@ -2595,19 +2645,20 @@ spam_checker:
|
||||
|
||||
|
||||
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
|
||||
|
||||
# Server Notices room configuration
|
||||
|
||||
@@ -14,6 +14,7 @@ The Python class is instantiated with two objects:
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
All the methods must be defined.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
@@ -24,13 +25,18 @@ well as some specific methods:
|
||||
* `user_may_publish_room`
|
||||
* `check_username_for_spam`
|
||||
* `check_registration_for_spam`
|
||||
* `check_media_file_for_spam`
|
||||
|
||||
The details of the each of these methods (as well as their inputs and outputs)
|
||||
The details of each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
|
||||
The `ModuleApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals.
|
||||
|
||||
Additionally, a `parse_config` method is mandatory and receives the plugin config
|
||||
dictionary. After parsing, It must return an object which will be
|
||||
passed to `__init__` later.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
@@ -41,6 +47,10 @@ class ExampleSpamChecker:
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config):
|
||||
return config
|
||||
|
||||
async def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
@@ -59,8 +69,17 @@ class ExampleSpamChecker:
|
||||
async def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
|
||||
async def check_registration_for_spam(self, email_threepid, username, request_info):
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
):
|
||||
return RegistrationBehaviour.ALLOW # allow all registrations
|
||||
|
||||
async def check_media_file_for_spam(self, file_wrapper, file_info):
|
||||
return False # allow all media
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -4,6 +4,7 @@ AssertPathExists=/etc/matrix-synapse/workers/%i.yaml
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
ReloadPropagatedFrom=matrix-synapse.target
|
||||
|
||||
# if this is started at the same time as the main, let the main process start
|
||||
# first, to initialise the database schema.
|
||||
|
||||
@@ -3,6 +3,7 @@ Description=Synapse master
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
ReloadPropagatedFrom=matrix-synapse.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
|
||||
@@ -220,10 +220,6 @@ Asks the server for the current position of all streams.
|
||||
|
||||
Acknowledge receipt of some federation data
|
||||
|
||||
#### REMOVE_PUSHER (C)
|
||||
|
||||
Inform the server a pusher should be removed
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
|
||||
@@ -187,7 +187,7 @@ After updating the homeserver configuration, you must restart synapse:
|
||||
```
|
||||
* If you use systemd:
|
||||
```
|
||||
systemctl restart synapse.service
|
||||
systemctl restart matrix-synapse.service
|
||||
```
|
||||
... and then reload any clients (or wait an hour for them to refresh their
|
||||
settings).
|
||||
|
||||
@@ -276,7 +276,8 @@ using):
|
||||
|
||||
Ensure that all SSO logins go to a single process.
|
||||
For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530).
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
configured in the `worker_listeners` option in the worker config.
|
||||
@@ -373,7 +374,15 @@ Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
|
||||
```yaml
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
|
||||
5
mypy.ini
5
mypy.ini
@@ -23,6 +23,7 @@ files =
|
||||
synapse/events/validator.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
synapse/handlers,
|
||||
synapse/http/client.py,
|
||||
synapse/http/federation/matrix_federation_agent.py,
|
||||
@@ -68,6 +69,7 @@ files =
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/stringutils.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
@@ -115,9 +117,6 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-unpaddedbase64]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
@@ -2,9 +2,14 @@
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Restore backup of sample config upon script exit
|
||||
trap "mv docs/sample_config.yaml.bak docs/sample_config.yaml" EXIT
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
diff docs/sample_config.yaml docs/sample_config.yaml.bak
|
||||
|
||||
@@ -162,12 +162,23 @@ else
|
||||
fi
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
# Also delete any shadow tables from fts4
|
||||
# This needs to be done after synapse_port_db is run
|
||||
echo "Dropping unwanted db tables..."
|
||||
SQL="
|
||||
DROP TABLE schema_version;
|
||||
DROP TABLE applied_schema_deltas;
|
||||
DROP TABLE applied_module_schemas;
|
||||
DROP TABLE event_search_content;
|
||||
DROP TABLE event_search_segments;
|
||||
DROP TABLE event_search_segdir;
|
||||
DROP TABLE event_search_docsize;
|
||||
DROP TABLE event_search_stat;
|
||||
DROP TABLE user_directory_search_content;
|
||||
DROP TABLE user_directory_search_segments;
|
||||
DROP TABLE user_directory_search_segdir;
|
||||
DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
|
||||
@@ -87,7 +87,9 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
|
||||
|
||||
signature = signature.copy_modified(
|
||||
arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds,
|
||||
arg_types=arg_types,
|
||||
arg_names=arg_names,
|
||||
arg_kinds=arg_kinds,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
@@ -22,7 +22,7 @@ import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from typing import Dict, Optional, Set
|
||||
from typing import Dict, Iterable, Optional, Set
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -47,6 +47,7 @@ from synapse.storage.databases.main.events_bg_updates import (
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
@@ -177,6 +178,7 @@ class Store(
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
EndToEndKeyBackgroundStore,
|
||||
StatsStore,
|
||||
PusherWorkerStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -629,7 +631,13 @@ class Porter(object):
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_device_inbox_seq()
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data"))
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized", ))
|
||||
await self._setup_auth_chain_sequence()
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -854,7 +862,7 @@ class Porter(object):
|
||||
|
||||
return done, remaining + done
|
||||
|
||||
async def _setup_state_group_id_seq(self):
|
||||
async def _setup_state_group_id_seq(self) -> None:
|
||||
curr_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True
|
||||
)
|
||||
@@ -868,7 +876,7 @@ class Porter(object):
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
async def _setup_user_id_seq(self):
|
||||
async def _setup_user_id_seq(self) -> None:
|
||||
curr_id = await self.sqlite_store.db_pool.runInteraction(
|
||||
"setup_user_id_seq", find_max_generated_user_id_localpart
|
||||
)
|
||||
@@ -877,9 +885,9 @@ class Porter(object):
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
async def _setup_events_stream_seqs(self):
|
||||
async def _setup_events_stream_seqs(self) -> None:
|
||||
"""Set the event stream sequences to the correct values.
|
||||
"""
|
||||
|
||||
@@ -908,35 +916,46 @@ class Porter(object):
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
|
||||
return await self.postgres_store.db_pool.runInteraction(
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
|
||||
)
|
||||
|
||||
async def _setup_device_inbox_seq(self):
|
||||
"""Set the device inbox sequence to the correct value.
|
||||
async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
|
||||
"""Set a sequence to the correct value.
|
||||
"""
|
||||
curr_local_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="device_inbox",
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
allow_none=True,
|
||||
)
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
allow_none=True,
|
||||
)
|
||||
current_stream_ids.append(max_stream_id)
|
||||
|
||||
curr_federation_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="device_federation_outbox",
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
allow_none=True,
|
||||
)
|
||||
next_id = max(current_stream_ids) + 1
|
||||
|
||||
next_id = max(curr_local_id, curr_federation_id) + 1
|
||||
def r(txn):
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
|
||||
txn.execute(sql + " %s", (next_id, ))
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE device_inbox_sequence RESTART WITH %s", (next_id,)
|
||||
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
|
||||
(curr_chain_id,),
|
||||
)
|
||||
|
||||
return self.postgres_store.db_pool.runInteraction("_setup_device_inbox_seq", r)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id", r,
|
||||
)
|
||||
|
||||
|
||||
|
||||
##############################################
|
||||
|
||||
@@ -3,6 +3,7 @@ test_suite = tests
|
||||
|
||||
[check-manifest]
|
||||
ignore =
|
||||
.git-blame-ignore-revs
|
||||
contrib
|
||||
contrib/*
|
||||
docs/*
|
||||
|
||||
4
setup.py
4
setup.py
@@ -97,12 +97,12 @@ CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"isort==5.7.0",
|
||||
"black==19.10b0",
|
||||
"black==20.8b1",
|
||||
"flake8-comprehensions",
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"]
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.11"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
|
||||
@@ -89,12 +89,16 @@ class SortedDict(Dict[_KT, _VT]):
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
Type[SortedDict[_KT, _VT]], Tuple[Callable[[_KT], Any], List[Tuple[_KT, _VT]]],
|
||||
Type[SortedDict[_KT, _VT]],
|
||||
Tuple[Callable[[_KT], Any], List[Tuple[_KT, _VT]]],
|
||||
]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def _check(self) -> None: ...
|
||||
def islice(
|
||||
self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool,
|
||||
self,
|
||||
start: Optional[int] = ...,
|
||||
stop: Optional[int] = ...,
|
||||
reverse=bool,
|
||||
) -> Iterator[_KT]: ...
|
||||
def bisect_left(self, value: _KT) -> int: ...
|
||||
def bisect_right(self, value: _KT) -> int: ...
|
||||
|
||||
@@ -31,7 +31,9 @@ class SortedList(MutableSequence[_T]):
|
||||
|
||||
DEFAULT_LOAD_FACTOR: int = ...
|
||||
def __init__(
|
||||
self, iterable: Optional[Iterable[_T]] = ..., key: Optional[_Key[_T]] = ...,
|
||||
self,
|
||||
iterable: Optional[Iterable[_T]] = ...,
|
||||
key: Optional[_Key[_T]] = ...,
|
||||
): ...
|
||||
# NB: currently mypy does not honour return type, see mypy #3307
|
||||
@overload
|
||||
@@ -76,10 +78,18 @@ class SortedList(MutableSequence[_T]):
|
||||
def __len__(self) -> int: ...
|
||||
def reverse(self) -> None: ...
|
||||
def islice(
|
||||
self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool,
|
||||
self,
|
||||
start: Optional[int] = ...,
|
||||
stop: Optional[int] = ...,
|
||||
reverse=bool,
|
||||
) -> Iterator[_T]: ...
|
||||
def _islice(
|
||||
self, min_pos: int, min_idx: int, max_pos: int, max_idx: int, reverse: bool,
|
||||
self,
|
||||
min_pos: int,
|
||||
min_idx: int,
|
||||
max_pos: int,
|
||||
max_idx: int,
|
||||
reverse: bool,
|
||||
) -> Iterator[_T]: ...
|
||||
def irange(
|
||||
self,
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"""
|
||||
from typing import Any, List, Optional, Type, Union
|
||||
|
||||
class RedisProtocol:
|
||||
from twisted.internet import protocol
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
async def ping(self) -> None: ...
|
||||
async def set(
|
||||
@@ -52,7 +54,7 @@ def lazyConnection(
|
||||
|
||||
class ConnectionHandler: ...
|
||||
|
||||
class RedisFactory:
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
continueTrying: bool
|
||||
handler: RedisProtocol
|
||||
pool: List[RedisProtocol]
|
||||
|
||||
@@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.27.0rc1"
|
||||
__version__ = "1.30.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.logging import opentracing as opentracing
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -163,12 +164,12 @@ class Auth:
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: Request,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
) -> synapse.types.Requester:
|
||||
""" Get a registered user's ID.
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
@@ -294,9 +295,12 @@ class Auth:
|
||||
return user_id, app_service
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self, token: str, rights: str = "access", allow_expired: bool = False,
|
||||
self,
|
||||
token: str,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
) -> TokenLookupResult:
|
||||
""" Validate access token and get user_id from it
|
||||
"""Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
token: The access token to get the user by
|
||||
@@ -405,7 +409,7 @@ class Auth:
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
user_id = get_value_from_macaroon(macaroon, "user_id")
|
||||
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
@@ -413,7 +417,12 @@ class Auth:
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
KeyError,
|
||||
TypeError,
|
||||
ValueError,
|
||||
):
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if rights == "access":
|
||||
@@ -421,27 +430,6 @@ class Auth:
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def get_user_id_from_macaroon(self, macaroon):
|
||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||
|
||||
Does *not* validate the macaroon.
|
||||
|
||||
Args:
|
||||
macaroon (pymacaroons.Macaroon): The macaroon to validate
|
||||
|
||||
Returns:
|
||||
(str) user id
|
||||
|
||||
Raises:
|
||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||
macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
@@ -462,21 +450,13 @@ class Auth:
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
satisfy_expiry(v, self.clock.time_msec)
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
|
||||
def _verify_expiry(self, caveat):
|
||||
prefix = "time < "
|
||||
if not caveat.startswith(prefix):
|
||||
return False
|
||||
expiry = int(caveat[len(prefix) :])
|
||||
now = self.hs.get_clock().time_msec()
|
||||
return now < expiry
|
||||
|
||||
def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
@@ -489,7 +469,7 @@ class Auth:
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
""" Check if the given user is a local server admin.
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
user: user to check
|
||||
@@ -500,7 +480,10 @@ class Auth:
|
||||
return await self.store.is_server_admin(user)
|
||||
|
||||
def compute_auth_events(
|
||||
self, event, current_state_ids: StateMap[str], for_verification: bool = False,
|
||||
self,
|
||||
event,
|
||||
current_state_ids: StateMap[str],
|
||||
for_verification: bool = False,
|
||||
) -> List[str]:
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
|
||||
@@ -27,6 +27,11 @@ MAX_ALIAS_LENGTH = 255
|
||||
# the maximum length for a user id is 255 characters
|
||||
MAX_USERID_LENGTH = 255
|
||||
|
||||
# The maximum length for a group id is 255 characters
|
||||
MAX_GROUPID_LENGTH = 255
|
||||
MAX_GROUP_CATEGORYID_LENGTH = 255
|
||||
MAX_GROUP_ROLEID_LENGTH = 255
|
||||
|
||||
|
||||
class Membership:
|
||||
|
||||
@@ -93,11 +98,14 @@ class EventTypes:
|
||||
|
||||
Retention = "m.room.retention"
|
||||
|
||||
Presence = "m.presence"
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class RejectedReason:
|
||||
AUTH_ERROR = "auth_error"
|
||||
|
||||
@@ -128,8 +136,7 @@ class UserTypes:
|
||||
|
||||
|
||||
class RelationTypes:
|
||||
"""The types of relations known to this server.
|
||||
"""
|
||||
"""The types of relations known to this server."""
|
||||
|
||||
ANNOTATION = "m.annotation"
|
||||
REPLACE = "m.replace"
|
||||
|
||||
@@ -390,8 +390,7 @@ class InvalidCaptchaError(SynapseError):
|
||||
|
||||
|
||||
class LimitExceededError(SynapseError):
|
||||
"""A client has sent too many requests and is being throttled.
|
||||
"""
|
||||
"""A client has sent too many requests and is being throttled."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -408,8 +407,7 @@ class LimitExceededError(SynapseError):
|
||||
|
||||
|
||||
class RoomKeysVersionError(SynapseError):
|
||||
"""A client has tried to upload to a non-current version of the room_keys store
|
||||
"""
|
||||
"""A client has tried to upload to a non-current version of the room_keys store"""
|
||||
|
||||
def __init__(self, current_version: str):
|
||||
"""
|
||||
@@ -426,7 +424,9 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
|
||||
def __init__(self, msg: str = "Homeserver does not support this room version"):
|
||||
super().__init__(
|
||||
code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
code=400,
|
||||
msg=msg,
|
||||
errcode=Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
|
||||
@@ -461,8 +461,7 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
|
||||
|
||||
class PasswordRefusedError(SynapseError):
|
||||
"""A password has been refused, either during password reset/change or registration.
|
||||
"""
|
||||
"""A password has been refused, either during password reset/change or registration."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -470,7 +469,9 @@ class PasswordRefusedError(SynapseError):
|
||||
errcode: str = Codes.WEAK_PASSWORD,
|
||||
):
|
||||
super().__init__(
|
||||
code=400, msg=msg, errcode=errcode,
|
||||
code=400,
|
||||
msg=msg,
|
||||
errcode=errcode,
|
||||
)
|
||||
|
||||
|
||||
@@ -493,7 +494,7 @@ class RequestSendFailed(RuntimeError):
|
||||
|
||||
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
|
||||
""" Utility method for constructing an error response for client-server
|
||||
"""Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
|
||||
Args:
|
||||
@@ -510,7 +511,7 @@ def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
|
||||
|
||||
|
||||
class FederationError(RuntimeError):
|
||||
""" This class is used to inform remote homeservers about erroneous
|
||||
"""This class is used to inform remote homeservers about erroneous
|
||||
PDUs they sent us.
|
||||
|
||||
FATAL: The remote server could not interpret the source event.
|
||||
|
||||
@@ -56,8 +56,7 @@ class UserPresenceState(
|
||||
|
||||
@classmethod
|
||||
def default(cls, user_id):
|
||||
"""Returns a default presence state.
|
||||
"""
|
||||
"""Returns a default presence state."""
|
||||
return cls(
|
||||
user_id=user_id,
|
||||
state=PresenceState.OFFLINE,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import OrderedDict
|
||||
from typing import Any, Optional, Tuple
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.types import Requester
|
||||
@@ -42,7 +42,9 @@ class Ratelimiter:
|
||||
# * How many times an action has occurred since a point in time
|
||||
# * The point in time
|
||||
# * The rate_hz of this particular entry. This can vary per request
|
||||
self.actions = OrderedDict() # type: OrderedDict[Any, Tuple[float, int, float]]
|
||||
self.actions = (
|
||||
OrderedDict()
|
||||
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
|
||||
|
||||
def can_requester_do_action(
|
||||
self,
|
||||
@@ -82,7 +84,7 @@ class Ratelimiter:
|
||||
|
||||
def can_do_action(
|
||||
self,
|
||||
key: Any,
|
||||
key: Hashable,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
@@ -175,7 +177,7 @@ class Ratelimiter:
|
||||
|
||||
def ratelimit(
|
||||
self,
|
||||
key: Any,
|
||||
key: Hashable,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
|
||||
@@ -42,6 +42,8 @@ class ConsentURIBuilder:
|
||||
"""
|
||||
if hs_config.form_secret is None:
|
||||
raise ConfigError("form_secret not set in config")
|
||||
if hs_config.public_baseurl is None:
|
||||
raise ConfigError("public_baseurl not set in config")
|
||||
|
||||
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.public_baseurl
|
||||
|
||||
@@ -17,8 +17,6 @@ import sys
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
|
||||
@@ -58,7 +58,7 @@ def register_sighup(func, *args, **kwargs):
|
||||
|
||||
|
||||
def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||
""" Run the reactor in the main process
|
||||
"""Run the reactor in the main process
|
||||
|
||||
Daemonizes if necessary, and then configures some resources, before starting
|
||||
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||
@@ -93,7 +93,7 @@ def start_reactor(
|
||||
logger,
|
||||
run_command=reactor.run,
|
||||
):
|
||||
""" Run the reactor in the main process
|
||||
"""Run the reactor in the main process
|
||||
|
||||
Daemonizes if necessary, and then configures some resources, before starting
|
||||
the reactor
|
||||
@@ -313,9 +313,7 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon
|
||||
refresh_certificate(hs)
|
||||
|
||||
# Start the tracer
|
||||
synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa
|
||||
hs
|
||||
)
|
||||
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
@@ -370,8 +368,7 @@ def setup_sentry(hs):
|
||||
|
||||
|
||||
def setup_sdnotify(hs):
|
||||
"""Adds process state hooks to tell systemd what we are up to.
|
||||
"""
|
||||
"""Adds process state hooks to tell systemd what we are up to."""
|
||||
|
||||
# Tell systemd our state, if we're using it. This will silently fail if
|
||||
# we're not using systemd.
|
||||
@@ -405,8 +402,7 @@ def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
||||
|
||||
|
||||
class _LimitedHostnameResolver:
|
||||
"""Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
|
||||
"""
|
||||
"""Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups."""
|
||||
|
||||
def __init__(self, resolver, max_dns_requests_in_flight):
|
||||
self._resolver = resolver
|
||||
|
||||
@@ -210,7 +210,9 @@ def start(config_options):
|
||||
config.update_user_directory = False
|
||||
config.run_background_tasks = False
|
||||
config.start_pushers = False
|
||||
config.pusher_shard_config.instances = []
|
||||
config.send_federation = False
|
||||
config.federation_shard_config.instances = []
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
@@ -190,7 +191,7 @@ class KeyUploadServlet(RestServlet):
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
async def on_POST(self, request, device_id):
|
||||
async def on_POST(self, request: Request, device_id: Optional[str]):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
@@ -223,10 +224,12 @@ class KeyUploadServlet(RestServlet):
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop the the X-Forwarded-For header.
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
@@ -239,6 +242,14 @@ class KeyUploadServlet(RestServlet):
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
@@ -421,8 +432,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
]
|
||||
|
||||
async def set_state(self, target_user, state, ignore_status_msg=False):
|
||||
"""Set the presence state of the user.
|
||||
"""
|
||||
"""Set the presence state of the user."""
|
||||
presence = state["presence"]
|
||||
|
||||
valid_presence = (
|
||||
@@ -646,9 +656,6 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
async def remove_pusher(self, app_id, push_key, user_id):
|
||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||
|
||||
@cache_in_self
|
||||
def get_replication_data_handler(self):
|
||||
return GenericWorkerReplicationHandler(self)
|
||||
@@ -923,22 +930,6 @@ def start(config_options):
|
||||
# For other worker types we force this to off.
|
||||
config.appservice.notify_appservices = False
|
||||
|
||||
if config.worker_app == "synapse.app.pusher":
|
||||
if config.server.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.server.start_pushers = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.server.start_pushers = False
|
||||
|
||||
if config.worker_app == "synapse.app.user_dir":
|
||||
if config.server.update_user_directory:
|
||||
sys.stderr.write(
|
||||
@@ -955,22 +946,6 @@ def start(config_options):
|
||||
# For other worker types we force this to off.
|
||||
config.server.update_user_directory = False
|
||||
|
||||
if config.worker_app == "synapse.app.federation_sender":
|
||||
if config.worker.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.worker.send_federation = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.worker.send_federation = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
|
||||
@@ -166,7 +166,10 @@ class ApplicationService:
|
||||
|
||||
@cached(num_args=1, cache_context=True)
|
||||
async def matches_user_in_member_list(
|
||||
self, room_id: str, store: "DataStore", cache_context: _CacheContext,
|
||||
self,
|
||||
room_id: str,
|
||||
store: "DataStore",
|
||||
cache_context: _CacheContext,
|
||||
) -> bool:
|
||||
"""Check if this service is interested a room based upon it's membership
|
||||
|
||||
|
||||
@@ -76,9 +76,6 @@ def _is_valid_3pe_result(r, field):
|
||||
fields = r["fields"]
|
||||
if not isinstance(fields, dict):
|
||||
return False
|
||||
for k in fields.keys():
|
||||
if not isinstance(fields[k], str):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -93,7 +90,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
async def query_user(self, service, user_id):
|
||||
@@ -230,7 +227,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
try:
|
||||
await self.put_json(
|
||||
uri=uri, json_body=body, args={"access_token": service.hs_token},
|
||||
uri=uri,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(events))
|
||||
|
||||
@@ -68,7 +68,7 @@ MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100
|
||||
|
||||
|
||||
class ApplicationServiceScheduler:
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
"""Public facing API for this module. Does the required DI to tie the
|
||||
components together. This also serves as the "event_pool", which in this
|
||||
case is a simple array.
|
||||
"""
|
||||
|
||||
@@ -21,7 +21,7 @@ import os
|
||||
from collections import OrderedDict
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import Any, Iterable, List, MutableMapping, Optional
|
||||
from typing import Any, Iterable, List, MutableMapping, Optional, Union
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
@@ -147,7 +147,20 @@ class Config:
|
||||
return int(value) * size
|
||||
|
||||
@staticmethod
|
||||
def parse_duration(value):
|
||||
def parse_duration(value: Union[str, int]) -> int:
|
||||
"""Convert a duration as a string or integer to a number of milliseconds.
|
||||
|
||||
If an integer is provided it is treated as milliseconds and is unchanged.
|
||||
|
||||
String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.
|
||||
No suffix is treated as milliseconds.
|
||||
|
||||
Args:
|
||||
value: The duration to parse.
|
||||
|
||||
Returns:
|
||||
The number of milliseconds in the duration.
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
second = 1000
|
||||
@@ -199,9 +212,8 @@ class Config:
|
||||
|
||||
@classmethod
|
||||
def read_file(cls, file_path, config_name):
|
||||
cls.check_file(file_path, config_name)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
"""Deprecated: call read_file directly"""
|
||||
return read_file(file_path, (config_name,))
|
||||
|
||||
def read_template(self, filename: str) -> jinja2.Template:
|
||||
"""Load a template file from disk.
|
||||
@@ -224,7 +236,9 @@ class Config:
|
||||
return self.read_templates([filename])[0]
|
||||
|
||||
def read_templates(
|
||||
self, filenames: List[str], custom_template_directory: Optional[str] = None,
|
||||
self,
|
||||
filenames: List[str],
|
||||
custom_template_directory: Optional[str] = None,
|
||||
) -> List[jinja2.Template]:
|
||||
"""Load a list of template files from disk using the given variables.
|
||||
|
||||
@@ -264,7 +278,10 @@ class Config:
|
||||
|
||||
# TODO: switch to synapse.util.templates.build_jinja_env
|
||||
loader = jinja2.FileSystemLoader(search_directories)
|
||||
env = jinja2.Environment(loader=loader, autoescape=jinja2.select_autoescape(),)
|
||||
env = jinja2.Environment(
|
||||
loader=loader,
|
||||
autoescape=jinja2.select_autoescape(),
|
||||
)
|
||||
|
||||
# Update the environment with our custom filters
|
||||
env.filters.update(
|
||||
@@ -825,24 +842,24 @@ class ShardedWorkerHandlingConfig:
|
||||
instances = attr.ib(type=List[str])
|
||||
|
||||
def should_handle(self, instance_name: str, key: str) -> bool:
|
||||
"""Whether this instance is responsible for handling the given key.
|
||||
"""
|
||||
# If multiple instances are not defined we always return true
|
||||
if not self.instances or len(self.instances) == 1:
|
||||
return True
|
||||
"""Whether this instance is responsible for handling the given key."""
|
||||
# If no instances are defined we assume some other worker is handling
|
||||
# this.
|
||||
if not self.instances:
|
||||
return False
|
||||
|
||||
return self.get_instance(key) == instance_name
|
||||
return self._get_instance(key) == instance_name
|
||||
|
||||
def get_instance(self, key: str) -> str:
|
||||
def _get_instance(self, key: str) -> str:
|
||||
"""Get the instance responsible for handling the given key.
|
||||
|
||||
Note: For things like federation sending the config for which instance
|
||||
is sending is known only to the sender instance if there is only one.
|
||||
Therefore `should_handle` should be used where possible.
|
||||
Note: For federation sending and pushers the config for which instance
|
||||
is sending is known only to the sender instance, so we don't expose this
|
||||
method by default.
|
||||
"""
|
||||
|
||||
if not self.instances:
|
||||
return "master"
|
||||
raise Exception("Unknown worker")
|
||||
|
||||
if len(self.instances) == 1:
|
||||
return self.instances[0]
|
||||
@@ -859,4 +876,52 @@ class ShardedWorkerHandlingConfig:
|
||||
return self.instances[remainder]
|
||||
|
||||
|
||||
__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
|
||||
@attr.s
|
||||
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
"""A version of `ShardedWorkerHandlingConfig` that is used for config
|
||||
options where all instances know which instances are responsible for the
|
||||
sharded work.
|
||||
"""
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
# We require that `self.instances` is non-empty.
|
||||
if not self.instances:
|
||||
raise Exception("Got empty list of instances for shard config")
|
||||
|
||||
def get_instance(self, key: str) -> str:
|
||||
"""Get the instance responsible for handling the given key."""
|
||||
return self._get_instance(key)
|
||||
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str:
|
||||
"""Check the given file exists, and read it into a string
|
||||
|
||||
If it does not, emit an error indicating the problem
|
||||
|
||||
Args:
|
||||
file_path: the file to be read
|
||||
config_path: where in the configuration file_path came from, so that a useful
|
||||
error can be emitted if it does not exist.
|
||||
Returns:
|
||||
content of the file.
|
||||
Raises:
|
||||
ConfigError if there is a problem reading the file.
|
||||
"""
|
||||
if not isinstance(file_path, str):
|
||||
raise ConfigError("%r is not a string", config_path)
|
||||
|
||||
try:
|
||||
os.stat(file_path)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
except OSError as e:
|
||||
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Config",
|
||||
"RootConfig",
|
||||
"ShardedWorkerHandlingConfig",
|
||||
"RoutableShardedWorkerHandlingConfig",
|
||||
"read_file",
|
||||
]
|
||||
|
||||
@@ -149,4 +149,8 @@ class ShardedWorkerHandlingConfig:
|
||||
instances: List[str]
|
||||
def __init__(self, instances: List[str]) -> None: ...
|
||||
def should_handle(self, instance_name: str, key: str) -> bool: ...
|
||||
|
||||
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
def get_instance(self, key: str) -> str: ...
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...
|
||||
|
||||
@@ -18,8 +18,7 @@ from ._base import Config
|
||||
|
||||
|
||||
class AuthConfig(Config):
|
||||
"""Password and login configuration
|
||||
"""
|
||||
"""Password and login configuration"""
|
||||
|
||||
section = "auth"
|
||||
|
||||
@@ -38,7 +37,9 @@ class AuthConfig(Config):
|
||||
|
||||
# User-interactive authentication
|
||||
ui_auth = config.get("ui_auth") or {}
|
||||
self.ui_auth_session_timeout = ui_auth.get("session_timeout", 0)
|
||||
self.ui_auth_session_timeout = self.parse_duration(
|
||||
ui_auth.get("session_timeout", 0)
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
@@ -94,11 +95,11 @@ class AuthConfig(Config):
|
||||
#require_uppercase: true
|
||||
|
||||
ui_auth:
|
||||
# The number of milliseconds to allow a user-interactive authentication
|
||||
# session to be active.
|
||||
# The amount of time to allow a user-interactive authentication session
|
||||
# to be active.
|
||||
#
|
||||
# This defaults to 0, meaning the user is queried for their credentials
|
||||
# before every action, but this can be overridden to alow a single
|
||||
# before every action, but this can be overridden to allow a single
|
||||
# validation to be re-used. This weakens the protections afforded by
|
||||
# the user-interactive authentication process, by allowing for multiple
|
||||
# (and potentially different) operations to use the same validation session.
|
||||
@@ -106,5 +107,5 @@ class AuthConfig(Config):
|
||||
# Uncomment below to allow for credential validation to last for 15
|
||||
# seconds.
|
||||
#
|
||||
#session_timeout: 15000
|
||||
#session_timeout: "15s"
|
||||
"""
|
||||
|
||||
@@ -13,7 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from typing import Any, List
|
||||
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._util import validate_config
|
||||
|
||||
|
||||
class CasConfig(Config):
|
||||
@@ -30,20 +35,26 @@ class CasConfig(Config):
|
||||
|
||||
if self.cas_enabled:
|
||||
self.cas_server_url = cas_config["server_url"]
|
||||
public_base_url = cas_config.get("service_url") or self.public_baseurl
|
||||
if public_base_url[-1] != "/":
|
||||
public_base_url += "/"
|
||||
|
||||
# The public baseurl is required because it is used by the redirect
|
||||
# template.
|
||||
public_baseurl = self.public_baseurl
|
||||
if not public_baseurl:
|
||||
raise ConfigError("cas_config requires a public_baseurl to be set")
|
||||
|
||||
# TODO Update this to a _synapse URL.
|
||||
self.cas_service_url = (
|
||||
public_base_url + "_matrix/client/r0/login/cas/ticket"
|
||||
)
|
||||
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
|
||||
self.cas_displayname_attribute = cas_config.get("displayname_attribute")
|
||||
self.cas_required_attributes = cas_config.get("required_attributes") or {}
|
||||
required_attributes = cas_config.get("required_attributes") or {}
|
||||
self.cas_required_attributes = _parsed_required_attributes_def(
|
||||
required_attributes
|
||||
)
|
||||
|
||||
else:
|
||||
self.cas_server_url = None
|
||||
self.cas_service_url = None
|
||||
self.cas_displayname_attribute = None
|
||||
self.cas_required_attributes = {}
|
||||
self.cas_required_attributes = []
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
@@ -75,3 +86,22 @@ class CasConfig(Config):
|
||||
# userGroup: "staff"
|
||||
# department: None
|
||||
"""
|
||||
|
||||
|
||||
# CAS uses a legacy required attributes mapping, not the one provided by
|
||||
# SsoAttributeRequirement.
|
||||
REQUIRED_ATTRIBUTES_SCHEMA = {
|
||||
"type": "object",
|
||||
"additionalProperties": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
}
|
||||
|
||||
|
||||
def _parsed_required_attributes_def(
|
||||
required_attributes: Any,
|
||||
) -> List[SsoAttributeRequirement]:
|
||||
validate_config(
|
||||
REQUIRED_ATTRIBUTES_SCHEMA,
|
||||
required_attributes,
|
||||
config_path=("cas_config", "required_attributes"),
|
||||
)
|
||||
return [SsoAttributeRequirement(k, v) for k, v in required_attributes.items()]
|
||||
|
||||
@@ -207,8 +207,7 @@ class DatabaseConfig(Config):
|
||||
)
|
||||
|
||||
def get_single_database(self) -> DatabaseConnectionConfig:
|
||||
"""Returns the database if there is only one, useful for e.g. tests
|
||||
"""
|
||||
"""Returns the database if there is only one, useful for e.g. tests"""
|
||||
if not self.databases:
|
||||
raise Exception("More than one database exists")
|
||||
|
||||
|
||||
@@ -166,6 +166,11 @@ class EmailConfig(Config):
|
||||
if not self.email_notif_from:
|
||||
missing.append("email.notif_from")
|
||||
|
||||
# public_baseurl is required to build password reset and validation links that
|
||||
# will be emailed to users
|
||||
if config.get("public_baseurl") is None:
|
||||
missing.append("public_baseurl")
|
||||
|
||||
if missing:
|
||||
raise ConfigError(
|
||||
MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),)
|
||||
@@ -264,6 +269,9 @@ class EmailConfig(Config):
|
||||
if not self.email_notif_from:
|
||||
missing.append("email.notif_from")
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
missing.append("public_baseurl")
|
||||
|
||||
if missing:
|
||||
raise ConfigError(
|
||||
"email.enable_notifs is True but required keys are missing: %s"
|
||||
@@ -281,7 +289,8 @@ class EmailConfig(Config):
|
||||
self.email_notif_template_html,
|
||||
self.email_notif_template_text,
|
||||
) = self.read_templates(
|
||||
[notif_template_html, notif_template_text], template_dir,
|
||||
[notif_template_html, notif_template_text],
|
||||
template_dir,
|
||||
)
|
||||
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
@@ -303,7 +312,8 @@ class EmailConfig(Config):
|
||||
self.account_validity_template_html,
|
||||
self.account_validity_template_text,
|
||||
) = self.read_templates(
|
||||
[expiry_template_html, expiry_template_text], template_dir,
|
||||
[expiry_template_html, expiry_template_text],
|
||||
template_dir,
|
||||
)
|
||||
|
||||
subjects_config = email_config.get("subjects", {})
|
||||
|
||||
@@ -41,6 +41,10 @@ class FederationConfig(Config):
|
||||
)
|
||||
self.federation_metrics_domains = set(federation_metrics_domains)
|
||||
|
||||
self.allow_profile_lookup_over_federation = config.get(
|
||||
"allow_profile_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
## Federation ##
|
||||
@@ -66,6 +70,12 @@ class FederationConfig(Config):
|
||||
#federation_metrics_domains:
|
||||
# - matrix.org
|
||||
# - example.com
|
||||
|
||||
# Uncomment to disable profile lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain profile data of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ import threading
|
||||
from string import Template
|
||||
|
||||
import yaml
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.logger import (
|
||||
ILogObserver,
|
||||
LogBeginner,
|
||||
STDLibLogObserver,
|
||||
eventAsText,
|
||||
@@ -162,7 +164,10 @@ class LoggingConfig(Config):
|
||||
)
|
||||
|
||||
logging_group.add_argument(
|
||||
"-f", "--log-file", dest="log_file", help=argparse.SUPPRESS,
|
||||
"-f",
|
||||
"--log-file",
|
||||
dest="log_file",
|
||||
help=argparse.SUPPRESS,
|
||||
)
|
||||
|
||||
def generate_files(self, config, config_dir_path):
|
||||
@@ -224,7 +229,8 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
|
||||
|
||||
threadlocal = threading.local()
|
||||
|
||||
def _log(event):
|
||||
@implementer(ILogObserver)
|
||||
def _log(event: dict) -> None:
|
||||
if "log_text" in event:
|
||||
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
|
||||
return
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, Optional, Tuple, Type
|
||||
from typing import Iterable, Mapping, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.types import Collection, JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
|
||||
@@ -53,7 +53,10 @@ class OIDCConfig(Config):
|
||||
"Multiple OIDC providers have the idp_id %r." % idp_id
|
||||
)
|
||||
|
||||
self.oidc_callback_url = self.public_baseurl + "_synapse/client/oidc/callback"
|
||||
public_baseurl = self.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError("oidc_config requires a public_baseurl to be set")
|
||||
self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback"
|
||||
|
||||
@property
|
||||
def oidc_enabled(self) -> bool:
|
||||
@@ -94,7 +97,26 @@ class OIDCConfig(Config):
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -198,9 +220,9 @@ class OIDCConfig(Config):
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# email_template: "{{ user.email }}"
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
# email_template: "{{{{ user.email }}}}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
@@ -215,7 +237,7 @@ class OIDCConfig(Config):
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: org.matrix.github
|
||||
# idp_brand: github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -227,8 +249,8 @@ class OIDCConfig(Config):
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
""".format(
|
||||
mapping_provider=DEFAULT_USER_MAPPING_PROVIDER
|
||||
)
|
||||
@@ -237,7 +259,7 @@ class OIDCConfig(Config):
|
||||
# jsonschema definition of the configuration settings for an oidc identity provider
|
||||
OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"required": ["issuer", "client_id", "client_secret"],
|
||||
"required": ["issuer", "client_id"],
|
||||
"properties": {
|
||||
"idp_id": {
|
||||
"type": "string",
|
||||
@@ -250,7 +272,12 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"idp_icon": {"type": "string"},
|
||||
"idp_brand": {
|
||||
"type": "string",
|
||||
# MSC2758-style namespaced identifier
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
},
|
||||
"idp_unstable_brand": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
@@ -259,6 +286,30 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"issuer": {"type": "string"},
|
||||
"client_id": {"type": "string"},
|
||||
"client_secret": {"type": "string"},
|
||||
"client_secret_jwt_key": {
|
||||
"type": "object",
|
||||
"required": ["jwt_header"],
|
||||
"oneOf": [
|
||||
{"required": ["key"]},
|
||||
{"required": ["key_file"]},
|
||||
],
|
||||
"properties": {
|
||||
"key": {"type": "string"},
|
||||
"key_file": {"type": "string"},
|
||||
"jwt_header": {
|
||||
"type": "object",
|
||||
"required": ["alg"],
|
||||
"properties": {
|
||||
"alg": {"type": "string"},
|
||||
},
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
"jwt_payload": {
|
||||
"type": "object",
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"client_auth_method": {
|
||||
"type": "string",
|
||||
# the following list is the same as the keys of
|
||||
@@ -352,9 +403,10 @@ def _parse_oidc_config_dict(
|
||||
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
ump_config.setdefault("config", {})
|
||||
|
||||
(user_mapping_provider_class, user_mapping_provider_config,) = load_module(
|
||||
ump_config, config_path + ("user_mapping_provider",)
|
||||
)
|
||||
(
|
||||
user_mapping_provider_class,
|
||||
user_mapping_provider_config,
|
||||
) = load_module(ump_config, config_path + ("user_mapping_provider",))
|
||||
|
||||
# Ensure loaded user mapping module has defined all necessary methods
|
||||
required_methods = [
|
||||
@@ -369,7 +421,11 @@ def _parse_oidc_config_dict(
|
||||
if missing_methods:
|
||||
raise ConfigError(
|
||||
"Class %s is missing required "
|
||||
"methods: %s" % (user_mapping_provider_class, ", ".join(missing_methods),),
|
||||
"methods: %s"
|
||||
% (
|
||||
user_mapping_provider_class,
|
||||
", ".join(missing_methods),
|
||||
),
|
||||
config_path + ("user_mapping_provider", "module"),
|
||||
)
|
||||
|
||||
@@ -396,15 +452,31 @@ def _parse_oidc_config_dict(
|
||||
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
|
||||
) from e
|
||||
|
||||
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
|
||||
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
|
||||
if client_secret_jwt_key_config is not None:
|
||||
keyfile = client_secret_jwt_key_config.get("key_file")
|
||||
if keyfile:
|
||||
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
|
||||
else:
|
||||
key = client_secret_jwt_key_config["key"]
|
||||
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
|
||||
key=key,
|
||||
jwt_header=client_secret_jwt_key_config["jwt_header"],
|
||||
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
|
||||
)
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
idp_icon=idp_icon,
|
||||
idp_brand=oidc_config.get("idp_brand"),
|
||||
unstable_idp_brand=oidc_config.get("unstable_idp_brand"),
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config["client_secret"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
scopes=oidc_config.get("scopes", ["openid"]),
|
||||
authorization_endpoint=oidc_config.get("authorization_endpoint"),
|
||||
@@ -419,6 +491,18 @@ def _parse_oidc_config_dict(
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderClientSecretJwtKey:
|
||||
# a pem-encoded signing key
|
||||
key = attr.ib(type=str)
|
||||
|
||||
# properties to include in the JWT header
|
||||
jwt_header = attr.ib(type=Mapping[str, str])
|
||||
|
||||
# properties to include in the JWT payload.
|
||||
jwt_payload = attr.ib(type=Mapping[str, str])
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderConfig:
|
||||
# a unique identifier for this identity provider. Used in the 'user_external_ids'
|
||||
@@ -434,6 +518,9 @@ class OidcProviderConfig:
|
||||
# Optional brand identifier for this IdP.
|
||||
idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# Optional brand identifier for the unstable API (see MSC2858).
|
||||
unstable_idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# whether the OIDC discovery mechanism is used to discover endpoints
|
||||
discover = attr.ib(type=bool)
|
||||
|
||||
@@ -444,8 +531,13 @@ class OidcProviderConfig:
|
||||
# oauth2 client id to use
|
||||
client_id = attr.ib(type=str)
|
||||
|
||||
# oauth2 client secret to use
|
||||
client_secret = attr.ib(type=str)
|
||||
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
|
||||
# a secret.
|
||||
client_secret = attr.ib(type=Optional[str])
|
||||
|
||||
# key to use to construct a JWT to use as a client secret. May be `None` if
|
||||
# `client_secret` is set.
|
||||
client_secret_jwt_key = attr.ib(type=Optional[OidcProviderClientSecretJwtKey])
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are 'client_secret_basic', 'client_secret_post' and
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ShardedWorkerHandlingConfig
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class PushConfig(Config):
|
||||
@@ -27,9 +27,6 @@ class PushConfig(Config):
|
||||
"group_unread_count_by_room", True
|
||||
)
|
||||
|
||||
pusher_instances = config.get("pusher_instances") or []
|
||||
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
|
||||
|
||||
# There was a a 'redact_content' setting but mistakenly read from the
|
||||
# 'email'section'. Check for the flag in the 'push' section, and log,
|
||||
# but do not honour it to avoid nasty surprises when people upgrade.
|
||||
|
||||
@@ -102,6 +102,16 @@ class RatelimitConfig(Config):
|
||||
defaults={"per_second": 0.01, "burst_count": 3},
|
||||
)
|
||||
|
||||
# Ratelimit cross-user key requests:
|
||||
# * For local requests this is keyed by the sending device.
|
||||
# * For requests received over federation this is keyed by the origin.
|
||||
#
|
||||
# Note that this isn't exposed in the configuration as it is obscure.
|
||||
self.rc_key_requests = RateLimitConfig(
|
||||
config.get("rc_key_requests", {}),
|
||||
defaults={"per_second": 20, "burst_count": 100},
|
||||
)
|
||||
|
||||
self.rc_3pid_validation = RateLimitConfig(
|
||||
config.get("rc_3pid_validation") or {},
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
|
||||
@@ -49,6 +49,10 @@ class AccountValidityConfig(Config):
|
||||
|
||||
self.startup_job_max_delta = self.period * 10.0 / 100.0
|
||||
|
||||
if self.renew_by_email_enabled:
|
||||
if "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
template_dir = config.get("template_dir")
|
||||
|
||||
if not template_dir:
|
||||
@@ -105,6 +109,13 @@ class RegistrationConfig(Config):
|
||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||
if self.account_threepid_delegate_msisdn and not self.public_baseurl:
|
||||
raise ConfigError(
|
||||
"The configuration option `public_baseurl` is required if "
|
||||
"`account_threepid_delegate.msisdn` is set, such that "
|
||||
"clients know where to submit validation tokens to. Please "
|
||||
"configure `public_baseurl`."
|
||||
)
|
||||
|
||||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
@@ -227,9 +238,8 @@ class RegistrationConfig(Config):
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email'
|
||||
# configuration section. You should also check that 'public_baseurl' is set
|
||||
# correctly.
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
@@ -320,7 +330,8 @@ class RegistrationConfig(Config):
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
# in on this server.
|
||||
#
|
||||
# (By default, no suggestion is made, so it is left up to the client.)
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
@@ -345,6 +356,8 @@ class RegistrationConfig(Config):
|
||||
# by the Matrix Identity Service API specification:
|
||||
# https://matrix.org/docs/spec/identity_service/latest
|
||||
#
|
||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||
#
|
||||
account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
@@ -378,6 +391,8 @@ class RegistrationConfig(Config):
|
||||
# By default, any room aliases included in this list will be created
|
||||
# as a publicly joinable room when the first user registers for the
|
||||
# homeserver. This behaviour can be customised with the settings below.
|
||||
# If the room already exists, make certain it is a publicly joinable
|
||||
# room. The join rule of the room must be set to 'public'.
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
|
||||
@@ -17,9 +17,7 @@ import os
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List
|
||||
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST
|
||||
from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
@@ -54,7 +52,7 @@ MediaStorageProviderConfig = namedtuple(
|
||||
|
||||
|
||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
"""Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
and creates a map from image media types to the thumbnail size, thumbnailing
|
||||
method, and thumbnail media type to precalculate
|
||||
|
||||
@@ -187,16 +185,17 @@ class ContentRepositoryConfig(Config):
|
||||
"to work"
|
||||
)
|
||||
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
|
||||
# we always blacklist '0.0.0.0' and '::', which are supposed to be
|
||||
# unroutable addresses.
|
||||
self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])
|
||||
self.url_preview_ip_range_blacklist = generate_ip_set(
|
||||
config["url_preview_ip_range_blacklist"],
|
||||
["0.0.0.0", "::"],
|
||||
config_path=("url_preview_ip_range_blacklist",),
|
||||
)
|
||||
|
||||
self.url_preview_ip_range_whitelist = IPSet(
|
||||
config.get("url_preview_ip_range_whitelist", ())
|
||||
self.url_preview_ip_range_whitelist = generate_ip_set(
|
||||
config.get("url_preview_ip_range_whitelist", ()),
|
||||
config_path=("url_preview_ip_range_whitelist",),
|
||||
)
|
||||
|
||||
self.url_preview_url_blacklist = config.get("url_preview_url_blacklist", ())
|
||||
@@ -207,7 +206,6 @@ class ContentRepositoryConfig(Config):
|
||||
|
||||
def generate_config_section(self, data_dir_path, **kwargs):
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
uploads_path = os.path.join(data_dir_path, "uploads")
|
||||
|
||||
formatted_thumbnail_sizes = "".join(
|
||||
THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES
|
||||
|
||||
@@ -123,7 +123,7 @@ class RoomDirectoryConfig(Config):
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean: True if user is allowed to crate the alias
|
||||
boolean: True if user is allowed to create the alias
|
||||
"""
|
||||
for rule in self._alias_creation_rules:
|
||||
if rule.matches(user_id, room_id, [alias]):
|
||||
|
||||
@@ -17,8 +17,7 @@
|
||||
import logging
|
||||
from typing import Any, List
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.module_loader import load_module, load_python_module
|
||||
|
||||
@@ -189,6 +188,8 @@ class SAML2Config(Config):
|
||||
import saml2
|
||||
|
||||
public_baseurl = self.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError("saml2_config requires a public_baseurl to be set")
|
||||
|
||||
if self.saml2_grandfathered_mxid_source_attribute:
|
||||
optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
|
||||
@@ -396,32 +397,18 @@ class SAML2Config(Config):
|
||||
}
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class SamlAttributeRequirement:
|
||||
"""Object describing a single requirement for SAML attributes."""
|
||||
|
||||
attribute = attr.ib(type=str)
|
||||
value = attr.ib(type=str)
|
||||
|
||||
JSON_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {"attribute": {"type": "string"}, "value": {"type": "string"}},
|
||||
"required": ["attribute", "value"],
|
||||
}
|
||||
|
||||
|
||||
ATTRIBUTE_REQUIREMENTS_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": SamlAttributeRequirement.JSON_SCHEMA,
|
||||
"items": SsoAttributeRequirement.JSON_SCHEMA,
|
||||
}
|
||||
|
||||
|
||||
def _parse_attribute_requirements_def(
|
||||
attribute_requirements: Any,
|
||||
) -> List[SamlAttributeRequirement]:
|
||||
) -> List[SsoAttributeRequirement]:
|
||||
validate_config(
|
||||
ATTRIBUTE_REQUIREMENTS_SCHEMA,
|
||||
attribute_requirements,
|
||||
config_path=["saml2_config", "attribute_requirements"],
|
||||
config_path=("saml2_config", "attribute_requirements"),
|
||||
)
|
||||
return [SamlAttributeRequirement(**x) for x in attribute_requirements]
|
||||
return [SsoAttributeRequirement(**x) for x in attribute_requirements]
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import os.path
|
||||
import re
|
||||
@@ -23,7 +24,7 @@ from typing import Any, Dict, Iterable, List, Optional, Set
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
from netaddr import IPSet
|
||||
from netaddr import AddrFormatError, IPNetwork, IPSet
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
@@ -40,6 +41,71 @@ logger = logging.Logger(__name__)
|
||||
# in the list.
|
||||
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
||||
|
||||
|
||||
def _6to4(network: IPNetwork) -> IPNetwork:
|
||||
"""Convert an IPv4 network into a 6to4 IPv6 network per RFC 3056."""
|
||||
|
||||
# 6to4 networks consist of:
|
||||
# * 2002 as the first 16 bits
|
||||
# * The first IPv4 address in the network hex-encoded as the next 32 bits
|
||||
# * The new prefix length needs to include the bits from the 2002 prefix.
|
||||
hex_network = hex(network.first)[2:]
|
||||
hex_network = ("0" * (8 - len(hex_network))) + hex_network
|
||||
return IPNetwork(
|
||||
"2002:%s:%s::/%d"
|
||||
% (
|
||||
hex_network[:4],
|
||||
hex_network[4:],
|
||||
16 + network.prefixlen,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def generate_ip_set(
|
||||
ip_addresses: Optional[Iterable[str]],
|
||||
extra_addresses: Optional[Iterable[str]] = None,
|
||||
config_path: Optional[Iterable[str]] = None,
|
||||
) -> IPSet:
|
||||
"""
|
||||
Generate an IPSet from a list of IP addresses or CIDRs.
|
||||
|
||||
Additionally, for each IPv4 network in the list of IP addresses, also
|
||||
includes the corresponding IPv6 networks.
|
||||
|
||||
This includes:
|
||||
|
||||
* IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1)
|
||||
* IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2)
|
||||
* 6to4 Address (see RFC 3056, section 2)
|
||||
|
||||
Args:
|
||||
ip_addresses: An iterable of IP addresses or CIDRs.
|
||||
extra_addresses: An iterable of IP addresses or CIDRs.
|
||||
config_path: The path in the configuration for error messages.
|
||||
|
||||
Returns:
|
||||
A new IP set.
|
||||
"""
|
||||
result = IPSet()
|
||||
for ip in itertools.chain(ip_addresses or (), extra_addresses or ()):
|
||||
try:
|
||||
network = IPNetwork(ip)
|
||||
except AddrFormatError as e:
|
||||
raise ConfigError(
|
||||
"Invalid IP range provided: %s." % (ip,), config_path
|
||||
) from e
|
||||
result.add(network)
|
||||
|
||||
# It is possible that these already exist in the set, but that's OK.
|
||||
if ":" not in str(network):
|
||||
result.add(IPNetwork(network).ipv6(ipv4_compatible=True))
|
||||
result.add(IPNetwork(network).ipv6(ipv4_compatible=False))
|
||||
result.add(_6to4(network))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# IP ranges that are considered private / unroutable / don't make sense.
|
||||
DEFAULT_IP_RANGE_BLACKLIST = [
|
||||
# Localhost
|
||||
"127.0.0.0/8",
|
||||
@@ -53,6 +119,8 @@ DEFAULT_IP_RANGE_BLACKLIST = [
|
||||
"192.0.0.0/24",
|
||||
# Link-local networks.
|
||||
"169.254.0.0/16",
|
||||
# Formerly used for 6to4 relay.
|
||||
"192.88.99.0/24",
|
||||
# Testing networks.
|
||||
"198.18.0.0/15",
|
||||
"192.0.2.0/24",
|
||||
@@ -66,6 +134,12 @@ DEFAULT_IP_RANGE_BLACKLIST = [
|
||||
"fe80::/10",
|
||||
# Unique local addresses.
|
||||
"fc00::/7",
|
||||
# Testing networks.
|
||||
"2001:db8::/32",
|
||||
# Multicast.
|
||||
"ff00::/8",
|
||||
# Site-local addresses
|
||||
"fec0::/10",
|
||||
]
|
||||
|
||||
DEFAULT_ROOM_VERSION = "6"
|
||||
@@ -161,11 +235,7 @@ class ServerConfig(Config):
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
self.public_baseurl = config.get("public_baseurl") or "https://%s/" % (
|
||||
self.server_name,
|
||||
)
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
|
||||
# Whether to enable user presence.
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
@@ -189,7 +259,14 @@ class ServerConfig(Config):
|
||||
# Whether to require sharing a room with a user to retrieve their
|
||||
# profile data
|
||||
self.limit_profile_requests_to_users_who_share_rooms = config.get(
|
||||
"limit_profile_requests_to_users_who_share_rooms", False,
|
||||
"limit_profile_requests_to_users_who_share_rooms",
|
||||
False,
|
||||
)
|
||||
|
||||
# Whether to retrieve and display profile data for a user when they
|
||||
# are invited to a room
|
||||
self.include_profile_data_on_invite = config.get(
|
||||
"include_profile_data_on_invite", True
|
||||
)
|
||||
|
||||
if "restrict_public_rooms_to_local_users" in config and (
|
||||
@@ -294,17 +371,15 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
# Attempt to create an IPSet from the given ranges
|
||||
try:
|
||||
self.ip_range_blacklist = IPSet(ip_range_blacklist)
|
||||
except Exception as e:
|
||||
raise ConfigError("Invalid range(s) provided in ip_range_blacklist.") from e
|
||||
# Always blacklist 0.0.0.0, ::
|
||||
self.ip_range_blacklist.update(["0.0.0.0", "::"])
|
||||
|
||||
try:
|
||||
self.ip_range_whitelist = IPSet(config.get("ip_range_whitelist", ()))
|
||||
except Exception as e:
|
||||
raise ConfigError("Invalid range(s) provided in ip_range_whitelist.") from e
|
||||
# Always blacklist 0.0.0.0, ::
|
||||
self.ip_range_blacklist = generate_ip_set(
|
||||
ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
|
||||
)
|
||||
|
||||
self.ip_range_whitelist = generate_ip_set(
|
||||
config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",)
|
||||
)
|
||||
|
||||
# The federation_ip_range_blacklist is used for backwards-compatibility
|
||||
# and only applies to federation and identity servers. If it is not given,
|
||||
@@ -312,16 +387,16 @@ class ServerConfig(Config):
|
||||
federation_ip_range_blacklist = config.get(
|
||||
"federation_ip_range_blacklist", ip_range_blacklist
|
||||
)
|
||||
try:
|
||||
self.federation_ip_range_blacklist = IPSet(federation_ip_range_blacklist)
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Invalid range(s) provided in federation_ip_range_blacklist."
|
||||
) from e
|
||||
# Always blacklist 0.0.0.0, ::
|
||||
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
|
||||
self.federation_ip_range_blacklist = generate_ip_set(
|
||||
federation_ip_range_blacklist,
|
||||
["0.0.0.0", "::"],
|
||||
config_path=("federation_ip_range_blacklist",),
|
||||
)
|
||||
|
||||
self.start_pushers = config.get("start_pushers", True)
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
|
||||
# (undocumented) option for torturing the worker-mode replication a bit,
|
||||
# for testing. The value defines the number of milliseconds to pause before
|
||||
@@ -550,7 +625,9 @@ class ServerConfig(Config):
|
||||
if manhole:
|
||||
self.listeners.append(
|
||||
ListenerConfig(
|
||||
port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -586,7 +663,8 @@ class ServerConfig(Config):
|
||||
# and letting the client know which email address is bound to an account and
|
||||
# which one isn't.
|
||||
self.request_token_inhibit_3pid_errors = config.get(
|
||||
"request_token_inhibit_3pid_errors", False,
|
||||
"request_token_inhibit_3pid_errors",
|
||||
False,
|
||||
)
|
||||
|
||||
# List of users trialing the new experimental default push rules. This setting is
|
||||
@@ -748,10 +826,6 @@ class ServerConfig(Config):
|
||||
# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
# 'listeners' below).
|
||||
#
|
||||
# If this is left unset, it defaults to 'https://<server_name>/'. (Note that
|
||||
# that will not work unless you configure Synapse or a reverse-proxy to listen
|
||||
# on port 443.)
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
@@ -767,8 +841,7 @@ class ServerConfig(Config):
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
@@ -779,6 +852,14 @@ class ServerConfig(Config):
|
||||
#
|
||||
#limit_profile_requests_to_users_who_share_rooms: true
|
||||
|
||||
# Uncomment to prevent a user's profile data from being retrieved and
|
||||
# displayed in a room until they have joined it. By default, a user's
|
||||
# profile data is included in an invite event, regardless of the values
|
||||
# of the above two settings, and whether or not the users share a server.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#include_profile_data_on_invite: false
|
||||
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
|
||||
@@ -12,14 +12,30 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Any, Dict
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class SsoAttributeRequirement:
|
||||
"""Object describing a single requirement for SSO attributes."""
|
||||
|
||||
attribute = attr.ib(type=str)
|
||||
# If a value is not given, than the attribute must simply exist.
|
||||
value = attr.ib(type=Optional[str])
|
||||
|
||||
JSON_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {"attribute": {"type": "string"}, "value": {"type": "string"}},
|
||||
"required": ["attribute", "value"],
|
||||
}
|
||||
|
||||
|
||||
class SSOConfig(Config):
|
||||
"""SSO Configuration
|
||||
"""
|
||||
"""SSO Configuration"""
|
||||
|
||||
section = "sso"
|
||||
|
||||
@@ -64,8 +80,11 @@ class SSOConfig(Config):
|
||||
# gracefully to the client). This would make it pointless to ask the user for
|
||||
# confirmation, since the URL the confirmation page would be showing wouldn't be
|
||||
# the client's.
|
||||
login_fallback_url = self.public_baseurl + "_matrix/static/client/login"
|
||||
self.sso_client_whitelist.append(login_fallback_url)
|
||||
# public_baseurl is an optional setting, so we only add the fallback's URL to the
|
||||
# list if it's provided (because we can't figure out what that URL is otherwise).
|
||||
if self.public_baseurl:
|
||||
login_fallback_url = self.public_baseurl + "_matrix/static/client/login"
|
||||
self.sso_client_whitelist.append(login_fallback_url)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
@@ -83,9 +102,9 @@ class SSOConfig(Config):
|
||||
# phishing attacks from evil.site. To avoid this, include a slash after the
|
||||
# hostname: "https://my.client/".
|
||||
#
|
||||
# The login fallback page (used by clients that don't natively support the
|
||||
# required login flows) is automatically whitelisted in addition to any URLs
|
||||
# in this list.
|
||||
# If public_baseurl is set, then the login fallback page (used by clients
|
||||
# that don't natively support the required login flows) is whitelisted in
|
||||
# addition to any URLs in this list.
|
||||
#
|
||||
# By default, this list is empty.
|
||||
#
|
||||
@@ -106,8 +125,7 @@ class SSOConfig(Config):
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL that the user will be redirected to after
|
||||
# login. Needs manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# login.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
@@ -185,15 +203,12 @@ class SSOConfig(Config):
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * display_url: the same as `redirect_url`, but with the query
|
||||
# parameters stripped. The intention is to have a
|
||||
# human-readable URL to show to users, not to use it as
|
||||
# the final address to redirect to. Needs manual escaping
|
||||
# (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# the final address to redirect to.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
@@ -213,9 +228,7 @@ class SSOConfig(Config):
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
|
||||
@@ -13,10 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from ._base import Config
|
||||
|
||||
ROOM_STATS_DISABLED_WARN = """\
|
||||
WARNING: room/user statistics have been disabled via the stats.enabled
|
||||
configuration setting. This means that certain features (such as the room
|
||||
directory) will not operate correctly. Future versions of Synapse may ignore
|
||||
this setting.
|
||||
|
||||
To fix this warning, remove the stats.enabled setting from your configuration
|
||||
file.
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
@@ -28,30 +40,29 @@ class StatsConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.stats_enabled = True
|
||||
self.stats_bucket_size = 86400 * 1000
|
||||
self.stats_retention = sys.maxsize
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
|
||||
self.stats_bucket_size = self.parse_duration(
|
||||
stats_config.get("bucket_size", "1d")
|
||||
)
|
||||
self.stats_retention = self.parse_duration(
|
||||
stats_config.get("retention", "%ds" % (sys.maxsize,))
|
||||
)
|
||||
if not self.stats_enabled:
|
||||
logger.warning(ROOM_STATS_DISABLED_WARN)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
"""
|
||||
|
||||
@@ -24,32 +24,46 @@ class UserDirectoryConfig(Config):
|
||||
section = "userdirectory"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
self.user_directory_search_enabled = True
|
||||
self.user_directory_search_all_users = False
|
||||
user_directory_config = config.get("user_directory", None)
|
||||
if user_directory_config:
|
||||
self.user_directory_search_enabled = user_directory_config.get(
|
||||
"enabled", True
|
||||
)
|
||||
self.user_directory_search_all_users = user_directory_config.get(
|
||||
"search_all_users", False
|
||||
)
|
||||
user_directory_config = config.get("user_directory") or {}
|
||||
self.user_directory_search_enabled = user_directory_config.get("enabled", True)
|
||||
self.user_directory_search_all_users = user_directory_config.get(
|
||||
"search_all_users", False
|
||||
)
|
||||
self.user_directory_search_prefer_local_users = user_directory_config.get(
|
||||
"prefer_local_users", False
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# User Directory configuration
|
||||
#
|
||||
# 'enabled' defines whether users can search the user directory. If
|
||||
# false then empty responses are returned to all queries. Defaults to
|
||||
# true.
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||
# rebuild the user_directory search indexes, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
#user_directory:
|
||||
# enabled: true
|
||||
# search_all_users: false
|
||||
user_directory:
|
||||
# Defines whether users can search the user directory. If false then
|
||||
# empty responses are returned to all queries. Defaults to true.
|
||||
#
|
||||
# Uncomment to disable the user directory.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Defines whether to search all users visible to your HS when searching
|
||||
# the user directory, rather than limiting to users visible in public
|
||||
# rooms. Defaults to false.
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
#
|
||||
#search_all_users: true
|
||||
|
||||
# Defines whether to prefer local users in search query results.
|
||||
# If True, local users are more likely to appear above remote users
|
||||
# when searching the user directory. Defaults to false.
|
||||
#
|
||||
# Uncomment to prefer local over remote users in user directory search
|
||||
# results.
|
||||
#
|
||||
#prefer_local_users: true
|
||||
"""
|
||||
|
||||
@@ -17,9 +17,28 @@ from typing import List, Union
|
||||
|
||||
import attr
|
||||
|
||||
from ._base import Config, ConfigError, ShardedWorkerHandlingConfig
|
||||
from ._base import (
|
||||
Config,
|
||||
ConfigError,
|
||||
RoutableShardedWorkerHandlingConfig,
|
||||
ShardedWorkerHandlingConfig,
|
||||
)
|
||||
from .server import ListenerConfig, parse_listener_def
|
||||
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
|
||||
The send_federation config option must be disabled in the main
|
||||
synapse process before they can be run in a separate worker.
|
||||
|
||||
Please add ``send_federation: false`` to the main config
|
||||
"""
|
||||
|
||||
_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
|
||||
The start_pushers config option must be disabled in the main
|
||||
synapse process before they can be run in a separate worker.
|
||||
|
||||
Please add ``start_pushers: false`` to the main config
|
||||
"""
|
||||
|
||||
|
||||
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
"""Helper for allowing parsing a string or list of strings to a config
|
||||
@@ -33,8 +52,7 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
|
||||
@attr.s
|
||||
class InstanceLocationConfig:
|
||||
"""The host and port to talk to an instance via HTTP replication.
|
||||
"""
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host = attr.ib(type=str)
|
||||
port = attr.ib(type=int)
|
||||
@@ -54,13 +72,19 @@ class WriterLocations:
|
||||
)
|
||||
typing = attr.ib(default="master", type=str)
|
||||
to_device = attr.ib(
|
||||
default=["master"], type=List[str], converter=_instance_to_list_converter,
|
||||
default=["master"],
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
account_data = attr.ib(
|
||||
default=["master"], type=List[str], converter=_instance_to_list_converter,
|
||||
default=["master"],
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
receipts = attr.ib(
|
||||
default=["master"], type=List[str], converter=_instance_to_list_converter,
|
||||
default=["master"],
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
|
||||
|
||||
@@ -98,6 +122,7 @@ class WorkerConfig(Config):
|
||||
self.worker_replication_secret = config.get("worker_replication_secret", None)
|
||||
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
self.instance_name = self.worker_name or "master"
|
||||
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
|
||||
@@ -107,16 +132,47 @@ class WorkerConfig(Config):
|
||||
if manhole:
|
||||
self.worker_listeners.append(
|
||||
ListenerConfig(
|
||||
port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
)
|
||||
)
|
||||
|
||||
# Whether to send federation traffic out in this process. This only
|
||||
# applies to some federation traffic, and so shouldn't be used to
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
# Handle federation sender configuration.
|
||||
#
|
||||
# There are two ways of configuring which instances handle federation
|
||||
# sending:
|
||||
# 1. The old way where "send_federation" is set to false and running a
|
||||
# `synapse.app.federation_sender` worker app.
|
||||
# 2. Specifying the workers sending federation in
|
||||
# `federation_sender_instances`.
|
||||
#
|
||||
|
||||
federation_sender_instances = config.get("federation_sender_instances") or []
|
||||
send_federation = config.get("send_federation", True)
|
||||
|
||||
federation_sender_instances = config.get("federation_sender_instances")
|
||||
if federation_sender_instances is None:
|
||||
# Default to an empty list, which means "another, unknown, worker is
|
||||
# responsible for it".
|
||||
federation_sender_instances = []
|
||||
|
||||
# If no federation sender instances are set we check if
|
||||
# `send_federation` is set, which means use master
|
||||
if send_federation:
|
||||
federation_sender_instances = ["master"]
|
||||
|
||||
if self.worker_app == "synapse.app.federation_sender":
|
||||
if send_federation:
|
||||
# If we're running federation senders, and not using
|
||||
# `federation_sender_instances`, then we should have
|
||||
# explicitly set `send_federation` to false.
|
||||
raise ConfigError(
|
||||
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
|
||||
)
|
||||
|
||||
federation_sender_instances = [self.worker_name]
|
||||
|
||||
self.send_federation = self.instance_name in federation_sender_instances
|
||||
self.federation_shard_config = ShardedWorkerHandlingConfig(
|
||||
federation_sender_instances
|
||||
)
|
||||
@@ -157,7 +213,37 @@ class WorkerConfig(Config):
|
||||
"Must only specify one instance to handle `receipts` messages."
|
||||
)
|
||||
|
||||
self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events)
|
||||
if len(self.writers.events) == 0:
|
||||
raise ConfigError("Must specify at least one instance to handle `events`.")
|
||||
|
||||
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
|
||||
self.writers.events
|
||||
)
|
||||
|
||||
# Handle sharded push
|
||||
start_pushers = config.get("start_pushers", True)
|
||||
pusher_instances = config.get("pusher_instances")
|
||||
if pusher_instances is None:
|
||||
# Default to an empty list, which means "another, unknown, worker is
|
||||
# responsible for it".
|
||||
pusher_instances = []
|
||||
|
||||
# If no pushers instances are set we check if `start_pushers` is
|
||||
# set, which means use master
|
||||
if start_pushers:
|
||||
pusher_instances = ["master"]
|
||||
|
||||
if self.worker_app == "synapse.app.pusher":
|
||||
if start_pushers:
|
||||
# If we're running pushers, and not using
|
||||
# `pusher_instances`, then we should have explicitly set
|
||||
# `start_pushers` to false.
|
||||
raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
|
||||
|
||||
pusher_instances = [self.instance_name]
|
||||
|
||||
self.start_pushers = self.instance_name in pusher_instances
|
||||
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
|
||||
|
||||
# Whether this worker should run background tasks or not.
|
||||
#
|
||||
|
||||
@@ -42,7 +42,7 @@ def check(
|
||||
do_sig_check: bool = True,
|
||||
do_size_check: bool = True,
|
||||
) -> None:
|
||||
""" Checks if this event is correctly authed.
|
||||
"""Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version_obj: the version of the room
|
||||
@@ -423,7 +423,9 @@ def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
|
||||
|
||||
|
||||
def check_redaction(
|
||||
room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase],
|
||||
room_version_obj: RoomVersion,
|
||||
event: EventBase,
|
||||
auth_events: StateMap[EventBase],
|
||||
) -> bool:
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
@@ -459,7 +461,9 @@ def check_redaction(
|
||||
|
||||
|
||||
def _check_power_levels(
|
||||
room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase],
|
||||
room_version_obj: RoomVersion,
|
||||
event: EventBase,
|
||||
auth_events: StateMap[EventBase],
|
||||
) -> None:
|
||||
user_list = event.content.get("users", {})
|
||||
# Validate users
|
||||
|
||||
@@ -98,7 +98,9 @@ class EventBuilder:
|
||||
return self._state_key is not None
|
||||
|
||||
async def build(
|
||||
self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]],
|
||||
self,
|
||||
prev_event_ids: List[str],
|
||||
auth_event_ids: Optional[List[str]],
|
||||
) -> EventBase:
|
||||
"""Transform into a fully signed and hashed event
|
||||
|
||||
|
||||
@@ -341,8 +341,7 @@ def _encode_state_dict(state_dict):
|
||||
|
||||
|
||||
def _decode_state_dict(input):
|
||||
"""Decodes a state dict encoded using `_encode_state_dict` above
|
||||
"""
|
||||
"""Decodes a state dict encoded using `_encode_state_dict` above"""
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
|
||||
@@ -15,8 +15,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import Collection
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
@@ -25,6 +28,8 @@ if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
import synapse.server
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
@@ -188,6 +193,7 @@ class SpamChecker:
|
||||
email_threepid: Optional[dict],
|
||||
username: Optional[str],
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> RegistrationBehaviour:
|
||||
"""Checks if we should allow the given registration request.
|
||||
|
||||
@@ -196,6 +202,9 @@ class SpamChecker:
|
||||
username: The request user name, if any
|
||||
request_info: List of tuples of user agent and IP that
|
||||
were used during the registration process.
|
||||
auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml",
|
||||
"cas". If any. Note this does not include users registered
|
||||
via a password provider.
|
||||
|
||||
Returns:
|
||||
Enum for how the request should be handled
|
||||
@@ -206,11 +215,72 @@ class SpamChecker:
|
||||
# spam checker
|
||||
checker = getattr(spam_checker, "check_registration_for_spam", None)
|
||||
if checker:
|
||||
behaviour = await maybe_awaitable(
|
||||
checker(email_threepid, username, request_info)
|
||||
)
|
||||
# Provide auth_provider_id if the function supports it
|
||||
checker_args = inspect.signature(checker)
|
||||
if len(checker_args.parameters) == 4:
|
||||
d = checker(
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
)
|
||||
elif len(checker_args.parameters) == 3:
|
||||
d = checker(email_threepid, username, request_info)
|
||||
else:
|
||||
logger.error(
|
||||
"Invalid signature for %s.check_registration_for_spam. Denying registration",
|
||||
spam_checker.__module__,
|
||||
)
|
||||
return RegistrationBehaviour.DENY
|
||||
|
||||
behaviour = await maybe_awaitable(d)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
|
||||
return RegistrationBehaviour.ALLOW
|
||||
|
||||
async def check_media_file_for_spam(
|
||||
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> bool:
|
||||
"""Checks if a piece of newly uploaded media should be blocked.
|
||||
|
||||
This will be called for local uploads, downloads of remote media, each
|
||||
thumbnail generated for those, and web pages/images used for URL
|
||||
previews.
|
||||
|
||||
Note that care should be taken to not do blocking IO operations in the
|
||||
main thread. For example, to get the contents of a file a module
|
||||
should do::
|
||||
|
||||
async def check_media_file_for_spam(
|
||||
self, file: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> bool:
|
||||
buffer = BytesIO()
|
||||
await file.write_chunks_to(buffer.write)
|
||||
|
||||
if buffer.getvalue() == b"Hello World":
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
Args:
|
||||
file: An object that allows reading the contents of the media.
|
||||
file_info: Metadata about the file.
|
||||
|
||||
Returns:
|
||||
True if the media should be blocked or False if it should be
|
||||
allowed.
|
||||
"""
|
||||
|
||||
for spam_checker in self.spam_checkers:
|
||||
# For backwards compatibility, only run if the method exists on the
|
||||
# spam checker
|
||||
checker = getattr(spam_checker, "check_media_file_for_spam", None)
|
||||
if checker:
|
||||
spam = await maybe_awaitable(checker(file_wrapper, file_info))
|
||||
if spam:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -40,7 +40,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
if module is not None:
|
||||
self.third_party_rules = module(
|
||||
config=config, module_api=hs.get_module_api(),
|
||||
config=config,
|
||||
module_api=hs.get_module_api(),
|
||||
)
|
||||
|
||||
async def check_event_allowed(
|
||||
|
||||
@@ -34,7 +34,7 @@ SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
|
||||
|
||||
|
||||
def prune_event(event: EventBase) -> EventBase:
|
||||
""" Returns a pruned version of the given event, which removes all keys we
|
||||
"""Returns a pruned version of the given event, which removes all keys we
|
||||
don't know about or think could potentially be dodgy.
|
||||
|
||||
This is used when we "redact" an event. We want to remove all fields that
|
||||
|
||||
@@ -750,7 +750,11 @@ class FederationClient(FederationBase):
|
||||
return resp[1]
|
||||
|
||||
async def send_invite(
|
||||
self, destination: str, room_id: str, event_id: str, pdu: EventBase,
|
||||
self,
|
||||
destination: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
pdu: EventBase,
|
||||
) -> EventBase:
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
@@ -34,7 +35,7 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -44,6 +45,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
@@ -85,19 +87,19 @@ received_queries_counter = Counter(
|
||||
)
|
||||
|
||||
pdu_process_time = Histogram(
|
||||
"synapse_federation_server_pdu_process_time", "Time taken to process an event",
|
||||
"synapse_federation_server_pdu_process_time",
|
||||
"Time taken to process an event",
|
||||
)
|
||||
|
||||
|
||||
last_pdu_age_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_age",
|
||||
"The age (in seconds) of the last PDU successfully received from the given domain",
|
||||
last_pdu_ts_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_time",
|
||||
"The timestamp of the last PDU which was successfully received from the given domain",
|
||||
labelnames=("server_name",),
|
||||
)
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
@@ -110,14 +112,15 @@ class FederationServer(FederationBase):
|
||||
# with FederationHandlerRegistry.
|
||||
hs.get_directory_handler()
|
||||
|
||||
self._federation_ratelimiter = hs.get_federation_ratelimiter()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
|
||||
# origins that we are currently processing a transaction from.
|
||||
# a dict from origin to txn id.
|
||||
self._active_transactions = {} # type: Dict[str, str]
|
||||
|
||||
# We cache results for transaction with the same ID
|
||||
self._transaction_resp_cache = ResponseCache(
|
||||
hs, "fed_txn_handler", timeout_ms=30000
|
||||
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
@@ -127,10 +130,10 @@ class FederationServer(FederationBase):
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(
|
||||
hs, "state_resp", timeout_ms=30000
|
||||
hs.get_clock(), "state_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
self._state_ids_resp_cache = ResponseCache(
|
||||
hs, "state_ids_resp", timeout_ms=30000
|
||||
hs.get_clock(), "state_ids_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self._federation_metrics_domains = (
|
||||
@@ -167,6 +170,33 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction_id)
|
||||
|
||||
# Reject malformed transactions early: reject if too many PDUs/EDUs
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
return 400, {}
|
||||
|
||||
# we only process one transaction from each origin at a time. We need to do
|
||||
# this check here, rather than in _on_incoming_transaction_inner so that we
|
||||
# don't cache the rejection in _transaction_resp_cache (so that if the txn
|
||||
# arrives again later, we can process it).
|
||||
current_transaction = self._active_transactions.get(origin)
|
||||
if current_transaction and current_transaction != transaction_id:
|
||||
logger.warning(
|
||||
"Received another txn %s from %s while still processing %s",
|
||||
transaction_id,
|
||||
origin,
|
||||
current_transaction,
|
||||
)
|
||||
return 429, {
|
||||
"errcode": Codes.UNKNOWN,
|
||||
"error": "Too many concurrent transactions",
|
||||
}
|
||||
|
||||
# CRITICAL SECTION: we must now not await until we populate _active_transactions
|
||||
# in _on_incoming_transaction_inner.
|
||||
|
||||
# We wrap in a ResponseCache so that we de-duplicate retried
|
||||
# transactions.
|
||||
return await self._transaction_resp_cache.wrap(
|
||||
@@ -180,31 +210,23 @@ class FederationServer(FederationBase):
|
||||
async def _on_incoming_transaction_inner(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
# Use a linearizer to ensure that transactions from a remote are
|
||||
# processed in order.
|
||||
with await self._transaction_linearizer.queue(origin):
|
||||
# We rate limit here *after* we've queued up the incoming requests,
|
||||
# so that we don't fill up the ratelimiter with blocked requests.
|
||||
#
|
||||
# This is important as the ratelimiter allows N concurrent requests
|
||||
# at a time, and only starts ratelimiting if there are more requests
|
||||
# than that being processed at a time. If we queued up requests in
|
||||
# the linearizer/response cache *after* the ratelimiting then those
|
||||
# queued up requests would count as part of the allowed limit of N
|
||||
# concurrent requests.
|
||||
with self._federation_ratelimiter.ratelimit(origin) as d:
|
||||
await d
|
||||
# CRITICAL SECTION: the first thing we must do (before awaiting) is
|
||||
# add an entry to _active_transactions.
|
||||
assert origin not in self._active_transactions
|
||||
self._active_transactions[origin] = transaction.transaction_id # type: ignore
|
||||
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
try:
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
del self._active_transactions[origin]
|
||||
|
||||
async def _handle_incoming_transaction(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
""" Process an incoming transaction and return the HTTP response
|
||||
"""Process an incoming transaction and return the HTTP response
|
||||
|
||||
Args:
|
||||
origin: the server making the request
|
||||
@@ -225,19 +247,6 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
|
||||
|
||||
# Reject if PDU count > 50 or EDU count > 100
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
|
||||
response = {}
|
||||
await self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return 400, response
|
||||
|
||||
# We process PDUs and EDUs in parallel. This is important as we don't
|
||||
# want to block things like to device messages from reaching clients
|
||||
# behind the potentially expensive handling of PDUs.
|
||||
@@ -333,48 +342,53 @@ class FederationServer(FederationBase):
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
async def process_pdus_for_room(room_id: str):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
with nested_logging_context(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning(
|
||||
"Ignoring PDUs for room %s from banned server", room_id
|
||||
)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
pdu_results[pdu.event_id] = await process_pdu(pdu)
|
||||
|
||||
async def process_pdu(pdu: EventBase) -> JsonDict:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
return {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
return {"error": str(e)}
|
||||
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if newest_pdu_ts and origin in self._federation_metrics_domains:
|
||||
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
|
||||
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
|
||||
last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
|
||||
|
||||
return pdu_results
|
||||
|
||||
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
|
||||
"""Process the EDUs in a received transaction.
|
||||
"""
|
||||
"""Process the EDUs in a received transaction."""
|
||||
|
||||
async def _process_edu(edu_dict):
|
||||
received_edus_counter.inc()
|
||||
@@ -437,25 +451,32 @@ class FederationServer(FederationBase):
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
resp = await self._state_ids_resp_cache.wrap(
|
||||
(room_id, event_id), self._on_state_ids_request_compute, room_id, event_id,
|
||||
(room_id, event_id),
|
||||
self._on_state_ids_request_compute,
|
||||
room_id,
|
||||
event_id,
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
async def _on_state_ids_request_compute(self, room_id, event_id):
|
||||
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids)
|
||||
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||
|
||||
async def _on_context_state_request_compute(
|
||||
self, room_id: str, event_id: str
|
||||
) -> Dict[str, list]:
|
||||
if event_id:
|
||||
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||
pdus = await self.handler.get_state_for_pdu(
|
||||
room_id, event_id
|
||||
) # type: Iterable[EventBase]
|
||||
else:
|
||||
pdus = (await self.state.get_current_state(room_id)).values()
|
||||
|
||||
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
auth_chain = await self.store.get_auth_chain(
|
||||
room_id, [pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
@@ -679,7 +700,7 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
"""Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
If the event is invalid, then this method throws a FederationError.
|
||||
(The error will then be logged and sent back to the sender (which
|
||||
@@ -859,13 +880,22 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
|
||||
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
|
||||
self.query_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
|
||||
|
||||
# Map from type to instance names that we should route EDU handling to.
|
||||
# We randomly choose one instance from the list to route to for each new
|
||||
# EDU received.
|
||||
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
)
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
):
|
||||
@@ -886,7 +916,7 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(
|
||||
self, query_type: str, handler: Callable[[dict], defer.Deferred]
|
||||
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
|
||||
):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation query of the given type.
|
||||
@@ -906,17 +936,23 @@ class FederationHandlerRegistry:
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
def register_instance_for_edu(self, edu_type: str, instance_name: str):
|
||||
"""Register that the EDU handler is on a different instance than master.
|
||||
"""
|
||||
"""Register that the EDU handler is on a different instance than master."""
|
||||
self._edu_type_to_instance[edu_type] = [instance_name]
|
||||
|
||||
def register_instances_for_edu(self, edu_type: str, instance_names: List[str]):
|
||||
"""Register that the EDU handler is on multiple instances.
|
||||
"""
|
||||
"""Register that the EDU handler is on multiple instances."""
|
||||
self._edu_type_to_instance[edu_type] = instance_names
|
||||
|
||||
async def on_edu(self, edu_type: str, origin: str, content: dict):
|
||||
if not self.config.use_presence and edu_type == "m.presence":
|
||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
||||
return
|
||||
|
||||
# If the incoming room key requests from a particular origin are over
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not self._room_key_request_rate_limiter.can_do_action(origin)
|
||||
):
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
@@ -953,7 +989,7 @@ class FederationHandlerRegistry:
|
||||
# Oh well, let's just log and move on.
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
async def on_query(self, query_type: str, args: dict):
|
||||
async def on_query(self, query_type: str, args: dict) -> JsonDict:
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return await handler(args)
|
||||
|
||||
@@ -30,8 +30,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransactionActions:
|
||||
""" Defines persistence actions that relate to handling Transactions.
|
||||
"""
|
||||
"""Defines persistence actions that relate to handling Transactions."""
|
||||
|
||||
def __init__(self, datastore):
|
||||
self.store = datastore
|
||||
@@ -57,8 +56,7 @@ class TransactionActions:
|
||||
async def set_response(
|
||||
self, origin: str, transaction: Transaction, code: int, response: JsonDict
|
||||
) -> None:
|
||||
"""Persist how we responded to a transaction.
|
||||
"""
|
||||
"""Persist how we responded to a transaction."""
|
||||
transaction_id = transaction.transaction_id # type: ignore
|
||||
if not transaction_id:
|
||||
raise RuntimeError("Cannot persist a transaction with no transaction_id")
|
||||
|
||||
@@ -468,8 +468,7 @@ class KeyedEduRow(
|
||||
|
||||
|
||||
class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
|
||||
"""Streams EDUs that don't have keys. See KeyedEduRow
|
||||
"""
|
||||
"""Streams EDUs that don't have keys. See KeyedEduRow"""
|
||||
|
||||
TypeId = "e"
|
||||
|
||||
@@ -519,7 +518,10 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
# them into the appropriate collection and then send them off.
|
||||
|
||||
buff = ParsedFederationStreamData(
|
||||
presence=[], presence_destinations=[], keyed_edus={}, edus={},
|
||||
presence=[],
|
||||
presence_destinations=[],
|
||||
keyed_edus={},
|
||||
edus={},
|
||||
)
|
||||
|
||||
# Parse the rows in the stream and add to the buffer
|
||||
|
||||
@@ -328,7 +328,9 @@ class FederationSender:
|
||||
# to allow us to perform catch-up later on if the remote is unreachable
|
||||
# for a while.
|
||||
await self.store.store_destination_rooms_entries(
|
||||
destinations, pdu.room_id, pdu.internal_metadata.stream_ordering,
|
||||
destinations,
|
||||
pdu.room_id,
|
||||
pdu.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
@@ -472,10 +474,10 @@ class FederationSender:
|
||||
self._processing_pending_presence = False
|
||||
|
||||
def send_presence_to_destinations(
|
||||
self, states: List[UserPresenceState], destinations: List[str]
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
destinations (list[str])
|
||||
destinations (list[str])
|
||||
"""
|
||||
|
||||
if not states or not self.hs.config.use_presence:
|
||||
@@ -616,8 +618,8 @@ class FederationSender:
|
||||
last_processed = None # type: Optional[str]
|
||||
|
||||
while True:
|
||||
destinations_to_wake = await self.store.get_catch_up_outstanding_destinations(
|
||||
last_processed
|
||||
destinations_to_wake = (
|
||||
await self.store.get_catch_up_outstanding_destinations(last_processed)
|
||||
)
|
||||
|
||||
if not destinations_to_wake:
|
||||
|
||||
@@ -17,6 +17,7 @@ import datetime
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, cast
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.errors import (
|
||||
@@ -85,13 +86,18 @@ class PerDestinationQueue:
|
||||
# processing. We have a guard in `attempt_new_transaction` that
|
||||
# ensure we don't start sending stuff.
|
||||
logger.error(
|
||||
"Create a per destination queue for %s on wrong worker", destination,
|
||||
"Create a per destination queue for %s on wrong worker",
|
||||
destination,
|
||||
)
|
||||
self._should_send_on_this_instance = False
|
||||
|
||||
self._destination = destination
|
||||
self.transmission_loop_running = False
|
||||
|
||||
# Flag to signal to any running transmission loop that there is new data
|
||||
# queued up to be sent.
|
||||
self._new_data_to_send = False
|
||||
|
||||
# True whilst we are sending events that the remote homeserver missed
|
||||
# because it was unreachable. We start in this state so we can perform
|
||||
# catch-up at startup.
|
||||
@@ -107,7 +113,7 @@ class PerDestinationQueue:
|
||||
# destination (we are the only updater so this is safe)
|
||||
self._last_successful_stream_ordering = None # type: Optional[int]
|
||||
|
||||
# a list of pending PDUs
|
||||
# a queue of pending PDUs
|
||||
self._pending_pdus = [] # type: List[EventBase]
|
||||
|
||||
# XXX this is never actually used: see
|
||||
@@ -207,6 +213,10 @@ class PerDestinationQueue:
|
||||
transaction in the background.
|
||||
"""
|
||||
|
||||
# Mark that we (may) have new things to send, so that any running
|
||||
# transmission loop will recheck whether there is stuff to send.
|
||||
self._new_data_to_send = True
|
||||
|
||||
if self.transmission_loop_running:
|
||||
# XXX: this can get stuck on by a never-ending
|
||||
# request at which point pending_pdus just keeps growing.
|
||||
@@ -249,125 +259,41 @@ class PerDestinationQueue:
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
self._new_data_to_send = False
|
||||
|
||||
device_update_edus, dev_list_id = await self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# BEGIN CRITICAL SECTION
|
||||
#
|
||||
# In order to avoid a race condition, we need to make sure that
|
||||
# the following code (from popping the queues up to the point
|
||||
# where we decide if we actually have any pending messages) is
|
||||
# atomic - otherwise new PDUs or EDUs might arrive in the
|
||||
# meantime, but not get sent because we hold the
|
||||
# transmission_loop_running flag.
|
||||
|
||||
pending_pdus = self._pending_pdus
|
||||
|
||||
# We can only include at most 50 PDUs per transactions
|
||||
pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
|
||||
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=False))
|
||||
pending_presence = self._pending_presence
|
||||
self._pending_presence = {}
|
||||
if pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self._clock.time_msec()
|
||||
)
|
||||
for presence in pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
pending_edus.extend(
|
||||
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self._pending_edus_keyed
|
||||
async with _TransactionQueueManager(self) as (
|
||||
pending_pdus,
|
||||
pending_edus,
|
||||
):
|
||||
_, val = self._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
if not pending_pdus and not pending_edus:
|
||||
logger.debug("TX [%s] Nothing to send", self._destination)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug(
|
||||
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination,
|
||||
len(pending_pdus),
|
||||
# If we've gotten told about new things to send during
|
||||
# checking for things to send, we try looking again.
|
||||
# Otherwise new PDUs or EDUs might arrive in the meantime,
|
||||
# but not get sent because we hold the
|
||||
# `transmission_loop_running` flag.
|
||||
if self._new_data_to_send:
|
||||
continue
|
||||
else:
|
||||
return
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug(
|
||||
"TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
self._destination,
|
||||
len(pending_pdus),
|
||||
)
|
||||
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
self._destination, pending_pdus, pending_edus
|
||||
)
|
||||
|
||||
if not pending_pdus and not pending_edus:
|
||||
logger.debug("TX [%s] Nothing to send", self._destination)
|
||||
self._last_device_stream_id = device_stream_id
|
||||
return
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=True))
|
||||
|
||||
# END CRITICAL SECTION
|
||||
|
||||
success = await self._transaction_manager.send_new_transaction(
|
||||
self._destination, pending_pdus, pending_edus
|
||||
)
|
||||
if success:
|
||||
sent_transactions_counter.inc()
|
||||
sent_edus_counter.inc(len(pending_edus))
|
||||
for edu in pending_edus:
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if to_device_edus:
|
||||
await self._store.delete_device_msgs_for_remote(
|
||||
self._destination, device_stream_id
|
||||
)
|
||||
|
||||
# also mark the device updates as sent
|
||||
if device_update_edus:
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self._destination, dev_list_id
|
||||
)
|
||||
await self._store.mark_as_sent_devices_by_remote(
|
||||
self._destination, dev_list_id
|
||||
)
|
||||
|
||||
self._last_device_stream_id = device_stream_id
|
||||
self._last_device_list_stream_id = dev_list_id
|
||||
|
||||
if pending_pdus:
|
||||
# we sent some PDUs and it was successful, so update our
|
||||
# last_successful_stream_ordering in the destinations table.
|
||||
final_pdu = pending_pdus[-1]
|
||||
last_successful_stream_ordering = (
|
||||
final_pdu.internal_metadata.stream_ordering
|
||||
)
|
||||
assert last_successful_stream_ordering
|
||||
await self._store.set_destination_last_successful_stream_ordering(
|
||||
self._destination, last_successful_stream_ordering
|
||||
)
|
||||
else:
|
||||
break
|
||||
except NotRetryingDestination as e:
|
||||
logger.debug(
|
||||
"TX [%s] not ready for retry yet (next retry at %s) - "
|
||||
@@ -400,7 +326,7 @@ class PerDestinationQueue:
|
||||
self._pending_presence = {}
|
||||
self._pending_rrs = {}
|
||||
|
||||
self._start_catching_up()
|
||||
self._start_catching_up()
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
except HttpResponseException as e:
|
||||
@@ -411,7 +337,6 @@ class PerDestinationQueue:
|
||||
e,
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"TX [%s] Failed to send transaction: %s", self._destination, e
|
||||
@@ -421,16 +346,12 @@ class PerDestinationQueue:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
except Exception:
|
||||
logger.exception("TX [%s] Failed to send transaction", self._destination)
|
||||
for p in pending_pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, self._destination
|
||||
)
|
||||
|
||||
self._start_catching_up()
|
||||
finally:
|
||||
# We want to be *very* sure we clear this after we stop processing
|
||||
self.transmission_loop_running = False
|
||||
@@ -440,8 +361,10 @@ class PerDestinationQueue:
|
||||
|
||||
if first_catch_up_check:
|
||||
# first catchup so get last_successful_stream_ordering from database
|
||||
self._last_successful_stream_ordering = await self._store.get_destination_last_successful_stream_ordering(
|
||||
self._destination
|
||||
self._last_successful_stream_ordering = (
|
||||
await self._store.get_destination_last_successful_stream_ordering(
|
||||
self._destination
|
||||
)
|
||||
)
|
||||
|
||||
if self._last_successful_stream_ordering is None:
|
||||
@@ -457,7 +380,8 @@ class PerDestinationQueue:
|
||||
# get at most 50 catchup room/PDUs
|
||||
while True:
|
||||
event_ids = await self._store.get_catch_up_room_event_ids(
|
||||
self._destination, self._last_successful_stream_ordering,
|
||||
self._destination,
|
||||
self._last_successful_stream_ordering,
|
||||
)
|
||||
|
||||
if not event_ids:
|
||||
@@ -495,13 +419,10 @@ class PerDestinationQueue:
|
||||
rooms = [p.room_id for p in catchup_pdus]
|
||||
logger.info("Catching up rooms to %s: %r", self._destination, rooms)
|
||||
|
||||
success = await self._transaction_manager.send_new_transaction(
|
||||
await self._transaction_manager.send_new_transaction(
|
||||
self._destination, catchup_pdus, []
|
||||
)
|
||||
|
||||
if not success:
|
||||
return
|
||||
|
||||
sent_transactions_counter.inc()
|
||||
final_pdu = catchup_pdus[-1]
|
||||
self._last_successful_stream_ordering = cast(
|
||||
@@ -580,3 +501,135 @@ class PerDestinationQueue:
|
||||
"""
|
||||
self._catching_up = True
|
||||
self._pending_pdus = []
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _TransactionQueueManager:
|
||||
"""A helper async context manager for pulling stuff off the queues and
|
||||
tracking what was last successfully sent, etc.
|
||||
"""
|
||||
|
||||
queue = attr.ib(type=PerDestinationQueue)
|
||||
|
||||
_device_stream_id = attr.ib(type=Optional[int], default=None)
|
||||
_device_list_id = attr.ib(type=Optional[int], default=None)
|
||||
_last_stream_ordering = attr.ib(type=Optional[int], default=None)
|
||||
_pdus = attr.ib(type=List[EventBase], factory=list)
|
||||
|
||||
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
|
||||
# First we calculate the EDUs we want to send, if any.
|
||||
|
||||
# We start by fetching device related EDUs, i.e device updates and to
|
||||
# device messages. We have to keep 2 free slots for presence and rr_edus.
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
if device_update_edus:
|
||||
self._device_list_id = dev_list_id
|
||||
else:
|
||||
self.queue._last_device_list_stream_id = dev_list_id
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = await self.queue._get_to_device_message_edus(limit)
|
||||
|
||||
if to_device_edus:
|
||||
self._device_stream_id = device_stream_id
|
||||
else:
|
||||
self.queue._last_device_stream_id = device_stream_id
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# Now add the read receipt EDU.
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
|
||||
|
||||
# And presence EDU.
|
||||
if self.queue._pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.queue._clock.time_msec()
|
||||
)
|
||||
for presence in self.queue._pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
self.queue._pending_presence = {}
|
||||
|
||||
# Finally add any other types of EDUs if there is room.
|
||||
pending_edus.extend(
|
||||
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
while (
|
||||
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
|
||||
and self.queue._pending_edus_keyed
|
||||
):
|
||||
_, val = self.queue._pending_edus_keyed.popitem()
|
||||
pending_edus.append(val)
|
||||
|
||||
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
|
||||
# queue
|
||||
self._pdus = self.queue._pending_pdus[:50]
|
||||
|
||||
if not self._pdus and not pending_edus:
|
||||
return [], []
|
||||
|
||||
# if we've decided to send a transaction anyway, and we have room, we
|
||||
# may as well send any pending RRs
|
||||
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
|
||||
pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
|
||||
|
||||
if self._pdus:
|
||||
self._last_stream_ordering = self._pdus[
|
||||
-1
|
||||
].internal_metadata.stream_ordering
|
||||
assert self._last_stream_ordering
|
||||
|
||||
return self._pdus, pending_edus
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
if exc_type is not None:
|
||||
# Failed to send transaction, so we bail out.
|
||||
return
|
||||
|
||||
# Successfully sent transactions, so we remove pending PDUs from the queue
|
||||
if self._pdus:
|
||||
self.queue._pending_pdus = self.queue._pending_pdus[len(self._pdus) :]
|
||||
|
||||
# Succeeded to send the transaction so we record where we have sent up
|
||||
# to in the various streams
|
||||
|
||||
if self._device_stream_id:
|
||||
await self.queue._store.delete_device_msgs_for_remote(
|
||||
self.queue._destination, self._device_stream_id
|
||||
)
|
||||
self.queue._last_device_stream_id = self._device_stream_id
|
||||
|
||||
# also mark the device updates as sent
|
||||
if self._device_list_id:
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self.queue._destination, self._device_list_id
|
||||
)
|
||||
await self.queue._store.mark_as_sent_devices_by_remote(
|
||||
self.queue._destination, self._device_list_id
|
||||
)
|
||||
self.queue._last_device_list_stream_id = self._device_list_id
|
||||
|
||||
if self._last_stream_ordering:
|
||||
# we sent some PDUs and it was successful, so update our
|
||||
# last_successful_stream_ordering in the destinations table.
|
||||
await self.queue._store.set_destination_last_successful_stream_ordering(
|
||||
self.queue._destination, self._last_stream_ordering
|
||||
)
|
||||
|
||||
@@ -36,9 +36,9 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
last_pdu_age_metric = Gauge(
|
||||
"synapse_federation_last_sent_pdu_age",
|
||||
"The age (in seconds) of the last PDU successfully sent to the given domain",
|
||||
last_pdu_ts_metric = Gauge(
|
||||
"synapse_federation_last_sent_pdu_time",
|
||||
"The timestamp of the last PDU which was successfully sent to the given domain",
|
||||
labelnames=("server_name",),
|
||||
)
|
||||
|
||||
@@ -65,16 +65,16 @@ class TransactionManager:
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
async def send_new_transaction(
|
||||
self, destination: str, pdus: List[EventBase], edus: List[Edu],
|
||||
) -> bool:
|
||||
self,
|
||||
destination: str,
|
||||
pdus: List[EventBase],
|
||||
edus: List[Edu],
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
destination: The destination to send to (e.g. 'example.org')
|
||||
pdus: In-order list of PDUs to send
|
||||
edus: List of EDUs to send
|
||||
|
||||
Returns:
|
||||
True iff the transaction was successful
|
||||
"""
|
||||
|
||||
# Make a transaction-sending opentracing span. This span follows on from
|
||||
@@ -93,8 +93,6 @@ class TransactionManager:
|
||||
edu.strip_context()
|
||||
|
||||
with start_active_span_follows_from("send_transaction", span_contexts):
|
||||
success = True
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
@@ -149,45 +147,29 @@ class TransactionManager:
|
||||
response = await self._transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
if e.code in (401, 404, 429) or 500 <= e.code:
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response", destination, txn_id, code
|
||||
)
|
||||
raise e
|
||||
set_tag(tags.ERROR, True)
|
||||
|
||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||
raise
|
||||
|
||||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination,
|
||||
txn_id,
|
||||
e_id,
|
||||
r,
|
||||
)
|
||||
else:
|
||||
for p in pdus:
|
||||
logger.info("TX [%s] {%s} got 200 response", destination, txn_id)
|
||||
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Failed to send event %s",
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination,
|
||||
txn_id,
|
||||
p.event_id,
|
||||
e_id,
|
||||
r,
|
||||
)
|
||||
success = False
|
||||
|
||||
if success and pdus and destination in self._federation_metrics_domains:
|
||||
if pdus and destination in self._federation_metrics_domains:
|
||||
last_pdu = pdus[-1]
|
||||
last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts
|
||||
last_pdu_age_metric.labels(server_name=destination).set(
|
||||
last_pdu_age / 1000
|
||||
last_pdu_ts_metric.labels(server_name=destination).set(
|
||||
last_pdu.origin_server_ts / 1000
|
||||
)
|
||||
|
||||
set_tag(tags.ERROR, not success)
|
||||
return success
|
||||
|
||||
@@ -39,7 +39,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_room_state_ids(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
"""Requests all state for a given room from the given server at the
|
||||
given event. Returns the state's event_id's
|
||||
|
||||
Args:
|
||||
@@ -63,7 +63,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id, timeout=None):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
"""Requests the pdu with give id and origin from the given server.
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote homeserver we want
|
||||
@@ -84,7 +84,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def backfill(self, destination, room_id, event_tuples, limit):
|
||||
""" Requests `limit` previous PDUs in a given context before list of
|
||||
"""Requests `limit` previous PDUs in a given context before list of
|
||||
PDUs.
|
||||
|
||||
Args:
|
||||
@@ -118,7 +118,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
async def send_transaction(self, transaction, json_data_callback=None):
|
||||
""" Sends the given Transaction to its destination
|
||||
"""Sends the given Transaction to its destination
|
||||
|
||||
Args:
|
||||
transaction (Transaction)
|
||||
@@ -551,8 +551,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||
"""Get a group profile
|
||||
"""
|
||||
"""Get a group profile"""
|
||||
path = _create_v1_path("/groups/%s/profile", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -584,8 +583,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_summary(self, destination, group_id, requester_user_id):
|
||||
"""Get a group summary
|
||||
"""
|
||||
"""Get a group summary"""
|
||||
path = _create_v1_path("/groups/%s/summary", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -597,8 +595,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_rooms_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get all rooms in a group
|
||||
"""
|
||||
"""Get all rooms in a group"""
|
||||
path = _create_v1_path("/groups/%s/rooms", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -611,8 +608,7 @@ class TransportLayerClient:
|
||||
def add_room_to_group(
|
||||
self, destination, group_id, requester_user_id, room_id, content
|
||||
):
|
||||
"""Add a room to a group
|
||||
"""
|
||||
"""Add a room to a group"""
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -626,8 +622,7 @@ class TransportLayerClient:
|
||||
def update_room_in_group(
|
||||
self, destination, group_id, requester_user_id, room_id, config_key, content
|
||||
):
|
||||
"""Update room in group
|
||||
"""
|
||||
"""Update room in group"""
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/room/%s/config/%s", group_id, room_id, config_key
|
||||
)
|
||||
@@ -641,8 +636,7 @@ class TransportLayerClient:
|
||||
)
|
||||
|
||||
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
|
||||
"""Remove a room from a group
|
||||
"""
|
||||
"""Remove a room from a group"""
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
|
||||
|
||||
return self.client.delete_json(
|
||||
@@ -654,8 +648,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
"""Get users in a group"""
|
||||
path = _create_v1_path("/groups/%s/users", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -667,8 +660,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users that have been invited to a group
|
||||
"""
|
||||
"""Get users that have been invited to a group"""
|
||||
path = _create_v1_path("/groups/%s/invited_users", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -680,8 +672,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def accept_group_invite(self, destination, group_id, user_id, content):
|
||||
"""Accept a group invite
|
||||
"""
|
||||
"""Accept a group invite"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -690,8 +681,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def join_group(self, destination, group_id, user_id, content):
|
||||
"""Attempts to join a group
|
||||
"""
|
||||
"""Attempts to join a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -702,8 +692,7 @@ class TransportLayerClient:
|
||||
def invite_to_group(
|
||||
self, destination, group_id, user_id, requester_user_id, content
|
||||
):
|
||||
"""Invite a user to a group
|
||||
"""
|
||||
"""Invite a user to a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -730,8 +719,7 @@ class TransportLayerClient:
|
||||
def remove_user_from_group(
|
||||
self, destination, group_id, requester_user_id, user_id, content
|
||||
):
|
||||
"""Remove a user from a group
|
||||
"""
|
||||
"""Remove a user from a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -772,8 +760,7 @@ class TransportLayerClient:
|
||||
def update_group_summary_room(
|
||||
self, destination, group_id, user_id, room_id, category_id, content
|
||||
):
|
||||
"""Update a room entry in a group summary
|
||||
"""
|
||||
"""Update a room entry in a group summary"""
|
||||
if category_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
@@ -796,8 +783,7 @@ class TransportLayerClient:
|
||||
def delete_group_summary_room(
|
||||
self, destination, group_id, user_id, room_id, category_id
|
||||
):
|
||||
"""Delete a room entry in a group summary
|
||||
"""
|
||||
"""Delete a room entry in a group summary"""
|
||||
if category_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
@@ -817,8 +803,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_categories(self, destination, group_id, requester_user_id):
|
||||
"""Get all categories in a group
|
||||
"""
|
||||
"""Get all categories in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -830,8 +815,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_category(self, destination, group_id, requester_user_id, category_id):
|
||||
"""Get category info in a group
|
||||
"""
|
||||
"""Get category info in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -845,8 +829,7 @@ class TransportLayerClient:
|
||||
def update_group_category(
|
||||
self, destination, group_id, requester_user_id, category_id, content
|
||||
):
|
||||
"""Update a category in a group
|
||||
"""
|
||||
"""Update a category in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -861,8 +844,7 @@ class TransportLayerClient:
|
||||
def delete_group_category(
|
||||
self, destination, group_id, requester_user_id, category_id
|
||||
):
|
||||
"""Delete a category in a group
|
||||
"""
|
||||
"""Delete a category in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return self.client.delete_json(
|
||||
@@ -874,8 +856,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_roles(self, destination, group_id, requester_user_id):
|
||||
"""Get all roles in a group
|
||||
"""
|
||||
"""Get all roles in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles", group_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -887,8 +868,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def get_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Get a roles info
|
||||
"""
|
||||
"""Get a roles info"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return self.client.get_json(
|
||||
@@ -902,8 +882,7 @@ class TransportLayerClient:
|
||||
def update_group_role(
|
||||
self, destination, group_id, requester_user_id, role_id, content
|
||||
):
|
||||
"""Update a role in a group
|
||||
"""
|
||||
"""Update a role in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return self.client.post_json(
|
||||
@@ -916,8 +895,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Delete a role in a group
|
||||
"""
|
||||
"""Delete a role in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return self.client.delete_json(
|
||||
@@ -931,8 +909,7 @@ class TransportLayerClient:
|
||||
def update_group_summary_user(
|
||||
self, destination, group_id, requester_user_id, user_id, role_id, content
|
||||
):
|
||||
"""Update a users entry in a group
|
||||
"""
|
||||
"""Update a users entry in a group"""
|
||||
if role_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
|
||||
@@ -950,8 +927,7 @@ class TransportLayerClient:
|
||||
|
||||
@log_function
|
||||
def set_group_join_policy(self, destination, group_id, requester_user_id, content):
|
||||
"""Sets the join policy for a group
|
||||
"""
|
||||
"""Sets the join policy for a group"""
|
||||
path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id)
|
||||
|
||||
return self.client.put_json(
|
||||
@@ -966,8 +942,7 @@ class TransportLayerClient:
|
||||
def delete_group_summary_user(
|
||||
self, destination, group_id, requester_user_id, user_id, role_id
|
||||
):
|
||||
"""Delete a users entry in a group
|
||||
"""
|
||||
"""Delete a users entry in a group"""
|
||||
if role_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
|
||||
@@ -983,8 +958,7 @@ class TransportLayerClient:
|
||||
)
|
||||
|
||||
def bulk_get_publicised_groups(self, destination, user_ids):
|
||||
"""Get the groups a list of users are publicising
|
||||
"""
|
||||
"""Get the groups a list of users are publicising"""
|
||||
|
||||
path = _create_v1_path("/get_groups_publicised")
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import re
|
||||
from typing import Optional, Tuple, Type
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.api.urls import (
|
||||
@@ -364,7 +365,10 @@ class BaseFederationServlet:
|
||||
continue
|
||||
|
||||
server.register_paths(
|
||||
method, (pattern,), self._wrap(code), self.__class__.__name__,
|
||||
method,
|
||||
(pattern,),
|
||||
self._wrap(code),
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
|
||||
@@ -381,7 +385,7 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
|
||||
# This is when someone is trying to send us a bunch of data.
|
||||
async def on_PUT(self, origin, content, query, transaction_id):
|
||||
""" Called on PUT /send/<transaction_id>/
|
||||
"""Called on PUT /send/<transaction_id>/
|
||||
|
||||
Args:
|
||||
request (twisted.web.http.Request): The HTTP request.
|
||||
@@ -480,10 +484,9 @@ class FederationQueryServlet(BaseFederationServlet):
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
async def on_GET(self, origin, content, query, query_type):
|
||||
return await self.handler.on_query_request(
|
||||
query_type,
|
||||
{k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()},
|
||||
)
|
||||
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
|
||||
args["origin"] = origin
|
||||
return await self.handler.on_query_request(query_type, args)
|
||||
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
@@ -855,8 +858,7 @@ class FederationVersionServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsProfileServlet(BaseFederationServlet):
|
||||
"""Get/set the basic profile of a group on behalf of a user
|
||||
"""
|
||||
"""Get/set the basic profile of a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/profile"
|
||||
|
||||
@@ -895,8 +897,7 @@ class FederationGroupsSummaryServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRoomsServlet(BaseFederationServlet):
|
||||
"""Get the rooms in a group on behalf of a user
|
||||
"""
|
||||
"""Get the rooms in a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
|
||||
|
||||
@@ -911,8 +912,7 @@ class FederationGroupsRoomsServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
|
||||
"""Add/remove room from group
|
||||
"""
|
||||
"""Add/remove room from group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
|
||||
|
||||
@@ -940,8 +940,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
|
||||
"""Update room config in group
|
||||
"""
|
||||
"""Update room config in group"""
|
||||
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
|
||||
@@ -961,8 +960,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsUsersServlet(BaseFederationServlet):
|
||||
"""Get the users in a group on behalf of a user
|
||||
"""
|
||||
"""Get the users in a group on behalf of a user"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users"
|
||||
|
||||
@@ -977,8 +975,7 @@ class FederationGroupsUsersServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
|
||||
"""Get the users that have been invited to a group
|
||||
"""
|
||||
"""Get the users that have been invited to a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
|
||||
|
||||
@@ -995,8 +992,7 @@ class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsInviteServlet(BaseFederationServlet):
|
||||
"""Ask a group server to invite someone to the group
|
||||
"""
|
||||
"""Ask a group server to invite someone to the group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
|
||||
|
||||
@@ -1013,8 +1009,7 @@ class FederationGroupsInviteServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
|
||||
"""Accept an invitation from the group server
|
||||
"""
|
||||
"""Accept an invitation from the group server"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
|
||||
|
||||
@@ -1028,8 +1023,7 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsJoinServlet(BaseFederationServlet):
|
||||
"""Attempt to join a group
|
||||
"""
|
||||
"""Attempt to join a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
|
||||
|
||||
@@ -1043,8 +1037,7 @@ class FederationGroupsJoinServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
|
||||
"""Leave or kick a user from the group
|
||||
"""
|
||||
"""Leave or kick a user from the group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
|
||||
|
||||
@@ -1061,8 +1054,7 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
|
||||
"""A group server has invited a local user
|
||||
"""
|
||||
"""A group server has invited a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
|
||||
|
||||
@@ -1076,8 +1068,7 @@ class FederationGroupsLocalInviteServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
|
||||
"""A group server has removed a local user
|
||||
"""
|
||||
"""A group server has removed a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
|
||||
|
||||
@@ -1093,8 +1084,7 @@ class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
|
||||
"""A group or user's server renews their attestation
|
||||
"""
|
||||
"""A group or user's server renews their attestation"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
|
||||
|
||||
@@ -1128,7 +1118,17 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
raise SynapseError(
|
||||
400, "category_id cannot be empty string", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"category_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_CATEGORYID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_summary_room(
|
||||
group_id,
|
||||
@@ -1156,8 +1156,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsCategoriesServlet(BaseFederationServlet):
|
||||
"""Get all categories for a group
|
||||
"""
|
||||
"""Get all categories for a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
|
||||
|
||||
@@ -1172,8 +1171,7 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsCategoryServlet(BaseFederationServlet):
|
||||
"""Add/remove/get a category in a group
|
||||
"""
|
||||
"""Add/remove/get a category in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
|
||||
|
||||
@@ -1196,6 +1194,14 @@ class FederationGroupsCategoryServlet(BaseFederationServlet):
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"category_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_CATEGORYID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.upsert_group_category(
|
||||
group_id, requester_user_id, category_id, content
|
||||
)
|
||||
@@ -1218,8 +1224,7 @@ class FederationGroupsCategoryServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRolesServlet(BaseFederationServlet):
|
||||
"""Get roles in a group
|
||||
"""
|
||||
"""Get roles in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
|
||||
|
||||
@@ -1234,8 +1239,7 @@ class FederationGroupsRolesServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsRoleServlet(BaseFederationServlet):
|
||||
"""Add/remove/get a role in a group
|
||||
"""
|
||||
"""Add/remove/get a role in a group"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
|
||||
|
||||
@@ -1254,7 +1258,17 @@ class FederationGroupsRoleServlet(BaseFederationServlet):
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
raise SynapseError(
|
||||
400, "role_id cannot be empty string", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"role_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_ROLEID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_role(
|
||||
group_id, requester_user_id, role_id, content
|
||||
@@ -1299,6 +1313,14 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"role_id may not be longer than %s characters"
|
||||
% (MAX_GROUP_ROLEID_LENGTH,),
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
resp = await self.handler.update_group_summary_user(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
@@ -1325,8 +1347,7 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
|
||||
"""Get roles in a group
|
||||
"""
|
||||
"""Get roles in a group"""
|
||||
|
||||
PATH = "/get_groups_publicised"
|
||||
|
||||
@@ -1339,8 +1360,7 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
"""Sets whether a group is joinable without an invite or knock
|
||||
"""
|
||||
"""Sets whether a group is joinable without an invite or knock"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
@attr.s(slots=True)
|
||||
class Edu(JsonEncodedObject):
|
||||
""" An Edu represents a piece of data sent from one homeserver to another.
|
||||
"""An Edu represents a piece of data sent from one homeserver to another.
|
||||
|
||||
In comparison to Pdus, Edus are not persisted for a long time on disk, are
|
||||
not meaningful beyond a given pair of homeservers, and don't have an
|
||||
@@ -63,7 +63,7 @@ class Edu(JsonEncodedObject):
|
||||
|
||||
|
||||
class Transaction(JsonEncodedObject):
|
||||
""" A transaction is a list of Pdus and Edus to be sent to a remote home
|
||||
"""A transaction is a list of Pdus and Edus to be sent to a remote home
|
||||
server with some extra metadata.
|
||||
|
||||
Example transaction::
|
||||
@@ -99,7 +99,7 @@ class Transaction(JsonEncodedObject):
|
||||
]
|
||||
|
||||
def __init__(self, transaction_id=None, pdus=[], **kwargs):
|
||||
""" If we include a list of pdus then we decode then as PDU's
|
||||
"""If we include a list of pdus then we decode then as PDU's
|
||||
automatically.
|
||||
"""
|
||||
|
||||
@@ -111,7 +111,7 @@ class Transaction(JsonEncodedObject):
|
||||
|
||||
@staticmethod
|
||||
def create_new(pdus, **kwargs):
|
||||
""" Used to create a new transaction. Will auto fill out
|
||||
"""Used to create a new transaction. Will auto fill out
|
||||
transaction_id and origin_server_ts keys.
|
||||
"""
|
||||
if "origin_server_ts" not in kwargs:
|
||||
|
||||
@@ -37,13 +37,16 @@ An attestation is a signed blob of json that looks like:
|
||||
|
||||
import logging
|
||||
import random
|
||||
from typing import Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,18 +64,21 @@ UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000
|
||||
|
||||
|
||||
class GroupAttestationSigning:
|
||||
"""Creates and verifies group attestations.
|
||||
"""
|
||||
"""Creates and verifies group attestations."""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.keyring = hs.get_keyring()
|
||||
self.clock = hs.get_clock()
|
||||
self.server_name = hs.hostname
|
||||
self.signing_key = hs.signing_key
|
||||
|
||||
async def verify_attestation(
|
||||
self, attestation, group_id, user_id, server_name=None
|
||||
):
|
||||
self,
|
||||
attestation: JsonDict,
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
server_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Verifies that the given attestation matches the given parameters.
|
||||
|
||||
An optional server_name can be supplied to explicitly set which server's
|
||||
@@ -101,16 +107,18 @@ class GroupAttestationSigning:
|
||||
if valid_until_ms < now:
|
||||
raise SynapseError(400, "Attestation expired")
|
||||
|
||||
assert server_name is not None
|
||||
await self.keyring.verify_json_for_server(
|
||||
server_name, attestation, now, "Group attestation"
|
||||
)
|
||||
|
||||
def create_attestation(self, group_id, user_id):
|
||||
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
|
||||
"""Create an attestation for the group_id and user_id with default
|
||||
validity length.
|
||||
"""
|
||||
validity_period = DEFAULT_ATTESTATION_LENGTH_MS
|
||||
validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER)
|
||||
validity_period = DEFAULT_ATTESTATION_LENGTH_MS * random.uniform(
|
||||
*DEFAULT_ATTESTATION_JITTER
|
||||
)
|
||||
valid_until_ms = int(self.clock.time_msec() + validity_period)
|
||||
|
||||
return sign_json(
|
||||
@@ -125,10 +133,9 @@ class GroupAttestationSigning:
|
||||
|
||||
|
||||
class GroupAttestionRenewer:
|
||||
"""Responsible for sending and receiving attestation updates.
|
||||
"""
|
||||
"""Responsible for sending and receiving attestation updates."""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.assestations = hs.get_groups_attestation_signing()
|
||||
@@ -141,9 +148,10 @@ class GroupAttestionRenewer:
|
||||
self._start_renew_attestations, 30 * 60 * 1000
|
||||
)
|
||||
|
||||
async def on_renew_attestation(self, group_id, user_id, content):
|
||||
"""When a remote updates an attestation
|
||||
"""
|
||||
async def on_renew_attestation(
|
||||
self, group_id: str, user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""When a remote updates an attestation"""
|
||||
attestation = content["attestation"]
|
||||
|
||||
if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
|
||||
@@ -157,12 +165,11 @@ class GroupAttestionRenewer:
|
||||
|
||||
return {}
|
||||
|
||||
def _start_renew_attestations(self):
|
||||
def _start_renew_attestations(self) -> None:
|
||||
return run_as_background_process("renew_attestations", self._renew_attestations)
|
||||
|
||||
async def _renew_attestations(self):
|
||||
"""Called periodically to check if we need to update any of our attestations
|
||||
"""
|
||||
async def _renew_attestations(self) -> None:
|
||||
"""Called periodically to check if we need to update any of our attestations"""
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
@@ -170,7 +177,7 @@ class GroupAttestionRenewer:
|
||||
now + UPDATE_ATTESTATION_TIME_MS
|
||||
)
|
||||
|
||||
async def _renew_attestation(group_user: Tuple[str, str]):
|
||||
async def _renew_attestation(group_user: Tuple[str, str]) -> None:
|
||||
group_id, user_id = group_user
|
||||
try:
|
||||
if not self.is_mine_id(group_id):
|
||||
|
||||
@@ -16,11 +16,17 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
|
||||
from synapse.handlers.groups_local import GroupsLocalHandler
|
||||
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
|
||||
from synapse.types import GroupID, JsonDict, RoomID, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -32,8 +38,13 @@ logger = logging.getLogger(__name__)
|
||||
# TODO: Flairs
|
||||
|
||||
|
||||
# Note that the maximum lengths are somewhat arbitrary.
|
||||
MAX_SHORT_DESC_LEN = 1000
|
||||
MAX_LONG_DESC_LEN = 10000
|
||||
|
||||
|
||||
class GroupsServerWorkerHandler:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.room_list_handler = hs.get_room_list_handler()
|
||||
@@ -48,16 +59,21 @@ class GroupsServerWorkerHandler:
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
async def check_group_is_ours(
|
||||
self, group_id, requester_user_id, and_exists=False, and_is_admin=None
|
||||
):
|
||||
self,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
and_exists: bool = False,
|
||||
and_is_admin: Optional[str] = None,
|
||||
) -> Optional[dict]:
|
||||
"""Check that the group is ours, and optionally if it exists.
|
||||
|
||||
If group does exist then return group.
|
||||
|
||||
Args:
|
||||
group_id (str)
|
||||
and_exists (bool): whether to also check if group exists
|
||||
and_is_admin (str): whether to also check if given str is a user_id
|
||||
group_id: The group ID to check.
|
||||
requester_user_id: The user ID of the requester.
|
||||
and_exists: whether to also check if group exists
|
||||
and_is_admin: whether to also check if given str is a user_id
|
||||
that is an admin
|
||||
"""
|
||||
if not self.is_mine_id(group_id):
|
||||
@@ -80,7 +96,9 @@ class GroupsServerWorkerHandler:
|
||||
|
||||
return group
|
||||
|
||||
async def get_group_summary(self, group_id, requester_user_id):
|
||||
async def get_group_summary(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get the summary for a group as seen by requester_user_id.
|
||||
|
||||
The group summary consists of the profile of the room, and a curated
|
||||
@@ -113,6 +131,8 @@ class GroupsServerWorkerHandler:
|
||||
entry = await self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True
|
||||
)
|
||||
if entry is None:
|
||||
continue
|
||||
entry = dict(entry) # so we don't change what's cached
|
||||
entry.pop("room_id", None)
|
||||
|
||||
@@ -120,22 +140,22 @@ class GroupsServerWorkerHandler:
|
||||
|
||||
rooms.sort(key=lambda e: e.get("order", 0))
|
||||
|
||||
for entry in users:
|
||||
user_id = entry["user_id"]
|
||||
for user in users:
|
||||
user_id = user["user_id"]
|
||||
|
||||
if not self.is_mine_id(requester_user_id):
|
||||
attestation = await self.store.get_remote_attestation(group_id, user_id)
|
||||
if not attestation:
|
||||
continue
|
||||
|
||||
entry["attestation"] = attestation
|
||||
user["attestation"] = attestation
|
||||
else:
|
||||
entry["attestation"] = self.attestations.create_attestation(
|
||||
user["attestation"] = self.attestations.create_attestation(
|
||||
group_id, user_id
|
||||
)
|
||||
|
||||
user_profile = await self.profile_handler.get_profile_from_cache(user_id)
|
||||
entry.update(user_profile)
|
||||
user.update(user_profile)
|
||||
|
||||
users.sort(key=lambda e: e.get("order", 0))
|
||||
|
||||
@@ -158,46 +178,44 @@ class GroupsServerWorkerHandler:
|
||||
"user": membership_info,
|
||||
}
|
||||
|
||||
async def get_group_categories(self, group_id, requester_user_id):
|
||||
"""Get all categories in a group (as seen by user)
|
||||
"""
|
||||
async def get_group_categories(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get all categories in a group (as seen by user)"""
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
categories = await self.store.get_group_categories(group_id=group_id)
|
||||
return {"categories": categories}
|
||||
|
||||
async def get_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Get a specific category in a group (as seen by user)
|
||||
"""
|
||||
async def get_group_category(
|
||||
self, group_id: str, requester_user_id: str, category_id: str
|
||||
) -> JsonDict:
|
||||
"""Get a specific category in a group (as seen by user)"""
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
res = await self.store.get_group_category(
|
||||
return await self.store.get_group_category(
|
||||
group_id=group_id, category_id=category_id
|
||||
)
|
||||
|
||||
logger.info("group %s", res)
|
||||
|
||||
return res
|
||||
|
||||
async def get_group_roles(self, group_id, requester_user_id):
|
||||
"""Get all roles in a group (as seen by user)
|
||||
"""
|
||||
async def get_group_roles(self, group_id: str, requester_user_id: str) -> JsonDict:
|
||||
"""Get all roles in a group (as seen by user)"""
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
roles = await self.store.get_group_roles(group_id=group_id)
|
||||
return {"roles": roles}
|
||||
|
||||
async def get_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Get a specific role in a group (as seen by user)
|
||||
"""
|
||||
async def get_group_role(
|
||||
self, group_id: str, requester_user_id: str, role_id: str
|
||||
) -> JsonDict:
|
||||
"""Get a specific role in a group (as seen by user)"""
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
res = await self.store.get_group_role(group_id=group_id, role_id=role_id)
|
||||
return res
|
||||
return await self.store.get_group_role(group_id=group_id, role_id=role_id)
|
||||
|
||||
async def get_group_profile(self, group_id, requester_user_id):
|
||||
"""Get the group profile as seen by requester_user_id
|
||||
"""
|
||||
async def get_group_profile(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get the group profile as seen by requester_user_id"""
|
||||
|
||||
await self.check_group_is_ours(group_id, requester_user_id)
|
||||
|
||||
@@ -218,7 +236,9 @@ class GroupsServerWorkerHandler:
|
||||
else:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
async def get_users_in_group(self, group_id, requester_user_id):
|
||||
async def get_users_in_group(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get the users in group as seen by requester_user_id.
|
||||
|
||||
The ordering is arbitrary at the moment
|
||||
@@ -267,7 +287,9 @@ class GroupsServerWorkerHandler:
|
||||
|
||||
return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
|
||||
|
||||
async def get_invited_users_in_group(self, group_id, requester_user_id):
|
||||
async def get_invited_users_in_group(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get the users that have been invited to a group as seen by requester_user_id.
|
||||
|
||||
The ordering is arbitrary at the moment
|
||||
@@ -297,7 +319,9 @@ class GroupsServerWorkerHandler:
|
||||
|
||||
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||
|
||||
async def get_rooms_in_group(self, group_id, requester_user_id):
|
||||
async def get_rooms_in_group(
|
||||
self, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get the rooms in group as seen by requester_user_id
|
||||
|
||||
This returns rooms in order of decreasing number of joined users
|
||||
@@ -335,17 +359,21 @@ class GroupsServerWorkerHandler:
|
||||
|
||||
|
||||
class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
async def update_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id, content
|
||||
):
|
||||
"""Add/update a room to the group summary
|
||||
"""
|
||||
self,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
room_id: str,
|
||||
category_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Add/update a room to the group summary"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -367,10 +395,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
return {}
|
||||
|
||||
async def delete_group_summary_room(
|
||||
self, group_id, requester_user_id, room_id, category_id
|
||||
):
|
||||
"""Remove a room from the summary
|
||||
"""
|
||||
self, group_id: str, requester_user_id: str, room_id: str, category_id: str
|
||||
) -> JsonDict:
|
||||
"""Remove a room from the summary"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -381,7 +408,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||
async def set_group_join_policy(
|
||||
self, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Sets the group join policy.
|
||||
|
||||
Currently supported policies are:
|
||||
@@ -401,10 +430,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
return {}
|
||||
|
||||
async def update_group_category(
|
||||
self, group_id, requester_user_id, category_id, content
|
||||
):
|
||||
"""Add/Update a group category
|
||||
"""
|
||||
self, group_id: str, requester_user_id: str, category_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Add/Update a group category"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -421,9 +449,10 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Delete a group category
|
||||
"""
|
||||
async def delete_group_category(
|
||||
self, group_id: str, requester_user_id: str, category_id: str
|
||||
) -> JsonDict:
|
||||
"""Delete a group category"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -434,9 +463,10 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||
"""Add/update a role in a group
|
||||
"""
|
||||
async def update_group_role(
|
||||
self, group_id: str, requester_user_id: str, role_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Add/update a role in a group"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -451,9 +481,10 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Remove role from group
|
||||
"""
|
||||
async def delete_group_role(
|
||||
self, group_id: str, requester_user_id: str, role_id: str
|
||||
) -> JsonDict:
|
||||
"""Remove role from group"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -463,10 +494,14 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
return {}
|
||||
|
||||
async def update_group_summary_user(
|
||||
self, group_id, requester_user_id, user_id, role_id, content
|
||||
):
|
||||
"""Add/update a users entry in the group summary
|
||||
"""
|
||||
self,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
user_id: str,
|
||||
role_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Add/update a users entry in the group summary"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -486,10 +521,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
return {}
|
||||
|
||||
async def delete_group_summary_user(
|
||||
self, group_id, requester_user_id, user_id, role_id
|
||||
):
|
||||
"""Remove a user from the group summary
|
||||
"""
|
||||
self, group_id: str, requester_user_id: str, user_id: str, role_id: str
|
||||
) -> JsonDict:
|
||||
"""Remove a user from the group summary"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -500,26 +534,43 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def update_group_profile(self, group_id, requester_user_id, content):
|
||||
"""Update the group profile
|
||||
"""
|
||||
async def update_group_profile(
|
||||
self, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> None:
|
||||
"""Update the group profile"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
profile = {}
|
||||
for keyname in ("name", "avatar_url", "short_description", "long_description"):
|
||||
for keyname, max_length in (
|
||||
("name", MAX_DISPLAYNAME_LEN),
|
||||
("avatar_url", MAX_AVATAR_URL_LEN),
|
||||
("short_description", MAX_SHORT_DESC_LEN),
|
||||
("long_description", MAX_LONG_DESC_LEN),
|
||||
):
|
||||
if keyname in content:
|
||||
value = content[keyname]
|
||||
if not isinstance(value, str):
|
||||
raise SynapseError(400, "%r value is not a string" % (keyname,))
|
||||
raise SynapseError(
|
||||
400,
|
||||
"%r value is not a string" % (keyname,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
if len(value) > max_length:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Invalid %s parameter" % (keyname,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
profile[keyname] = value
|
||||
|
||||
await self.store.update_group_profile(group_id, profile)
|
||||
|
||||
async def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
||||
"""Add room to group
|
||||
"""
|
||||
async def add_room_to_group(
|
||||
self, group_id: str, requester_user_id: str, room_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Add room to group"""
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
await self.check_group_is_ours(
|
||||
@@ -533,10 +584,14 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
return {}
|
||||
|
||||
async def update_room_in_group(
|
||||
self, group_id, requester_user_id, room_id, config_key, content
|
||||
):
|
||||
"""Update room in group
|
||||
"""
|
||||
self,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
room_id: str,
|
||||
config_key: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update room in group"""
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
await self.check_group_is_ours(
|
||||
@@ -554,9 +609,10 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def remove_room_from_group(self, group_id, requester_user_id, room_id):
|
||||
"""Remove room from group
|
||||
"""
|
||||
async def remove_room_from_group(
|
||||
self, group_id: str, requester_user_id: str, room_id: str
|
||||
) -> JsonDict:
|
||||
"""Remove room from group"""
|
||||
await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
@@ -565,13 +621,16 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def invite_to_group(self, group_id, user_id, requester_user_id, content):
|
||||
"""Invite user to group
|
||||
"""
|
||||
async def invite_to_group(
|
||||
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Invite user to group"""
|
||||
|
||||
group = await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
if not group:
|
||||
raise SynapseError(400, "Group does not exist", errcode=Codes.BAD_STATE)
|
||||
|
||||
# TODO: Check if user knocked
|
||||
|
||||
@@ -594,6 +653,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
assert isinstance(
|
||||
groups_local, GroupsLocalHandler
|
||||
), "Workers cannot invites users to groups."
|
||||
res = await groups_local.on_invite(group_id, user_id, content)
|
||||
local_attestation = None
|
||||
else:
|
||||
@@ -629,6 +691,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
return {"state": "join"}
|
||||
elif res["state"] == "invite":
|
||||
await self.store.add_group_invite(group_id, user_id)
|
||||
return {"state": "invite"}
|
||||
@@ -637,13 +700,17 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
else:
|
||||
raise SynapseError(502, "Unknown state returned by HS")
|
||||
|
||||
async def _add_user(self, group_id, user_id, content):
|
||||
async def _add_user(
|
||||
self, group_id: str, user_id: str, content: JsonDict
|
||||
) -> Optional[JsonDict]:
|
||||
"""Add a user to a group based on a content dict.
|
||||
|
||||
See accept_invite, join_group.
|
||||
"""
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
local_attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id, user_id
|
||||
) # type: Optional[JsonDict]
|
||||
|
||||
remote_attestation = content["attestation"]
|
||||
|
||||
@@ -667,7 +734,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return local_attestation
|
||||
|
||||
async def accept_invite(self, group_id, requester_user_id, content):
|
||||
async def accept_invite(
|
||||
self, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""User tries to accept an invite to the group.
|
||||
|
||||
This is different from them asking to join, and so should error if no
|
||||
@@ -686,7 +755,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {"state": "join", "attestation": local_attestation}
|
||||
|
||||
async def join_group(self, group_id, requester_user_id, content):
|
||||
async def join_group(
|
||||
self, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""User tries to join the group.
|
||||
|
||||
This will error if the group requires an invite/knock to join
|
||||
@@ -695,6 +766,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
group_info = await self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True
|
||||
)
|
||||
if not group_info:
|
||||
raise SynapseError(404, "Group does not exist", errcode=Codes.NOT_FOUND)
|
||||
if group_info["join_policy"] != "open":
|
||||
raise SynapseError(403, "Group is not publicly joinable")
|
||||
|
||||
@@ -702,26 +775,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {"state": "join", "attestation": local_attestation}
|
||||
|
||||
async def knock(self, group_id, requester_user_id, content):
|
||||
"""A user requests becoming a member of the group
|
||||
"""
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
async def accept_knock(self, group_id, requester_user_id, content):
|
||||
"""Accept a users knock to the room.
|
||||
|
||||
Errors if the user hasn't knocked, rather than inviting them.
|
||||
"""
|
||||
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
async def remove_user_from_group(
|
||||
self, group_id, user_id, requester_user_id, content
|
||||
):
|
||||
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Remove a user from the group; either a user is leaving or an admin
|
||||
kicked them.
|
||||
"""
|
||||
@@ -743,6 +799,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
if is_kick:
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
assert isinstance(
|
||||
groups_local, GroupsLocalHandler
|
||||
), "Workers cannot remove users from groups."
|
||||
await groups_local.user_removed_from_group(group_id, user_id, {})
|
||||
else:
|
||||
await self.transport_client.remove_user_from_group_notification(
|
||||
@@ -759,14 +818,15 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {}
|
||||
|
||||
async def create_group(self, group_id, requester_user_id, content):
|
||||
group = await self.check_group_is_ours(group_id, requester_user_id)
|
||||
|
||||
async def create_group(
|
||||
self, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
logger.info("Attempting to create group with ID: %r", group_id)
|
||||
|
||||
# parsing the id into a GroupID validates it.
|
||||
group_id_obj = GroupID.from_string(group_id)
|
||||
|
||||
group = await self.check_group_is_ours(group_id, requester_user_id)
|
||||
if group:
|
||||
raise SynapseError(400, "Group already exists")
|
||||
|
||||
@@ -811,7 +871,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
) # type: Optional[JsonDict]
|
||||
else:
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
@@ -834,15 +894,14 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
|
||||
return {"group_id": group_id}
|
||||
|
||||
async def delete_group(self, group_id, requester_user_id):
|
||||
async def delete_group(self, group_id: str, requester_user_id: str) -> None:
|
||||
"""Deletes a group, kicking out all current members.
|
||||
|
||||
Only group admins or server admins can call this request
|
||||
|
||||
Args:
|
||||
group_id (str)
|
||||
request_user_id (str)
|
||||
|
||||
group_id: The group ID to delete.
|
||||
requester_user_id: The user requesting to delete the group.
|
||||
"""
|
||||
|
||||
await self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
@@ -865,6 +924,9 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
async def _kick_user_from_group(user_id):
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
assert isinstance(
|
||||
groups_local, GroupsLocalHandler
|
||||
), "Workers cannot kick users from groups."
|
||||
await groups_local.user_removed_from_group(group_id, user_id, {})
|
||||
else:
|
||||
await self.transport_client.remove_user_from_group_notification(
|
||||
@@ -896,9 +958,8 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
await self.store.delete_group(group_id)
|
||||
|
||||
|
||||
def _parse_join_policy_from_contents(content):
|
||||
"""Given a content for a request, return the specified join policy or None
|
||||
"""
|
||||
def _parse_join_policy_from_contents(content: JsonDict) -> Optional[str]:
|
||||
"""Given a content for a request, return the specified join policy or None"""
|
||||
|
||||
join_policy_dict = content.get("m.join_policy")
|
||||
if join_policy_dict:
|
||||
@@ -907,9 +968,8 @@ def _parse_join_policy_from_contents(content):
|
||||
return None
|
||||
|
||||
|
||||
def _parse_join_policy_dict(join_policy_dict):
|
||||
"""Given a dict for the "m.join_policy" config return the join policy specified
|
||||
"""
|
||||
def _parse_join_policy_dict(join_policy_dict: JsonDict) -> str:
|
||||
"""Given a dict for the "m.join_policy" config return the join policy specified"""
|
||||
join_policy_type = join_policy_dict.get("type")
|
||||
if not join_policy_type:
|
||||
return "invite"
|
||||
@@ -919,7 +979,7 @@ def _parse_join_policy_dict(join_policy_dict):
|
||||
return join_policy_type
|
||||
|
||||
|
||||
def _parse_visibility_from_contents(content):
|
||||
def _parse_visibility_from_contents(content: JsonDict) -> bool:
|
||||
"""Given a content for a request parse out whether the entity should be
|
||||
public or not
|
||||
"""
|
||||
@@ -933,7 +993,7 @@ def _parse_visibility_from_contents(content):
|
||||
return is_public
|
||||
|
||||
|
||||
def _parse_visibility_dict(visibility):
|
||||
def _parse_visibility_dict(visibility: JsonDict) -> bool:
|
||||
"""Given a dict for the "m.visibility" config return if the entity should
|
||||
be public or not
|
||||
"""
|
||||
|
||||
@@ -73,7 +73,9 @@ class AcmeHandler:
|
||||
"Listening for ACME requests on %s:%i", host, self.hs.config.acme_port
|
||||
)
|
||||
try:
|
||||
self.reactor.listenTCP(self.hs.config.acme_port, srv, interface=host)
|
||||
self.reactor.listenTCP(
|
||||
self.hs.config.acme_port, srv, backlog=50, interface=host
|
||||
)
|
||||
except twisted.internet.error.CannotListenError as e:
|
||||
check_bind_error(e, host, bind_addresses)
|
||||
|
||||
|
||||
@@ -203,13 +203,11 @@ class AdminHandler(BaseHandler):
|
||||
|
||||
|
||||
class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
||||
"""Interface used to specify how to write exported data.
|
||||
"""
|
||||
"""Interface used to specify how to write exported data."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def write_events(self, room_id: str, events: List[EventBase]) -> None:
|
||||
"""Write a batch of events for a room.
|
||||
"""
|
||||
"""Write a batch of events for a room."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
|
||||
@@ -290,7 +290,9 @@ class ApplicationServicesHandler:
|
||||
if not interested:
|
||||
continue
|
||||
presence_events, _ = await presence_source.get_new_events(
|
||||
user=user, service=service, from_key=from_key,
|
||||
user=user,
|
||||
service=service,
|
||||
from_key=from_key,
|
||||
)
|
||||
time_now = self.clock.time_msec()
|
||||
events.extend(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user