Compare commits
137 Commits
michaelkay
...
v1.31.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d8863c67d | ||
|
|
a888cbdd31 | ||
|
|
fc8695d621 | ||
|
|
5fe38e07e7 | ||
|
|
3a446c21f8 | ||
|
|
78e48f61bf | ||
|
|
f380bb77d1 | ||
|
|
01dd90b0f0 | ||
|
|
7dcf3fd221 | ||
|
|
da75d2ea1f | ||
|
|
4bbd535450 | ||
|
|
5fdff97719 | ||
|
|
fc53a606e4 | ||
|
|
ad8690a26c | ||
|
|
0a778c135f | ||
|
|
7c8402ddb8 | ||
|
|
b5efcb577e | ||
|
|
019010964d | ||
|
|
262ed05f5b | ||
|
|
548c4a6587 | ||
|
|
c6f8e8086c | ||
|
|
12d6184713 | ||
|
|
d7d4232a2d | ||
|
|
d4c4798a25 | ||
|
|
e5801db830 | ||
|
|
fae81f2f68 | ||
|
|
c602ba8336 | ||
|
|
c2d4bd62a2 | ||
|
|
4c3827f2c1 | ||
|
|
c73cc2c2ad | ||
|
|
4655d2221e | ||
|
|
83de0be4b0 | ||
|
|
af387cf52a | ||
|
|
7e8dc9934e | ||
|
|
e550ab17ad | ||
|
|
0caf2a338e | ||
|
|
4ecba9bd5c | ||
|
|
b7748d3c00 | ||
|
|
5b268997bd | ||
|
|
4612302399 | ||
|
|
d66f9070cd | ||
|
|
d600d4506b | ||
|
|
e09838c78f | ||
|
|
e2904f720d | ||
|
|
b6ed4f55ac | ||
|
|
592d6305fd | ||
|
|
0b56481caa | ||
|
|
066068f034 | ||
|
|
0e35584734 | ||
|
|
201178db1a | ||
|
|
9b0e3009fa | ||
|
|
004234f03a | ||
|
|
066c703729 | ||
|
|
8dd2ea65a9 | ||
|
|
dd71eb0f8a | ||
|
|
405aeb0b2c | ||
|
|
7b06f85c0e | ||
|
|
cc324d53fe | ||
|
|
73dbce5523 | ||
|
|
ad721fc559 | ||
|
|
567f88f835 | ||
|
|
b449af0379 | ||
|
|
27d2820c33 | ||
|
|
dd5e5dc1d6 | ||
|
|
8000cf1315 | ||
|
|
45ef73fd4f | ||
|
|
e3bc0e6f7c | ||
|
|
ad5d2e7ec0 | ||
|
|
d315e96443 | ||
|
|
847ecdd8fa | ||
|
|
ccf1dc51d7 | ||
|
|
1383508f29 | ||
|
|
dd69110d95 | ||
|
|
5b5bc188cf | ||
|
|
1b0eaed21f | ||
|
|
1c8a2541da | ||
|
|
f87dfb9403 | ||
|
|
d29b71aa50 | ||
|
|
026503fa3b | ||
|
|
af2248f8bf | ||
|
|
55da8df078 | ||
|
|
1e67bff833 | ||
|
|
2b328d7e02 | ||
|
|
464e5da7b2 | ||
|
|
e55bd0e110 | ||
|
|
70d1b6abff | ||
|
|
a7a3790066 | ||
|
|
1107214a1d | ||
|
|
17cd48fe51 | ||
|
|
2a99cc6524 | ||
|
|
918f6ed827 | ||
|
|
67b979bfa1 | ||
|
|
dc51d8ffaf | ||
|
|
e9df3f496b | ||
|
|
eaada74075 | ||
|
|
9cd18cc588 | ||
|
|
7fdc6cefb3 | ||
|
|
075c16b410 | ||
|
|
3ce650057d | ||
|
|
576c91c7c1 | ||
|
|
22db45bd4d | ||
|
|
9898470e7d | ||
|
|
0764d0c6e5 | ||
|
|
d6196efafc | ||
|
|
b2c4d3d721 | ||
|
|
7076eee4b9 | ||
|
|
cb7fc7523e | ||
|
|
b988b07bb0 | ||
|
|
4de1c35728 | ||
|
|
15c788e22d | ||
|
|
58114f8a17 | ||
|
|
0fc4eb103a | ||
|
|
e5da770cce | ||
|
|
8a4b3738f3 | ||
|
|
df425c2c63 | ||
|
|
7eb6e39a8f | ||
|
|
a6333b8d42 | ||
|
|
ea0a3aaf0a | ||
|
|
3f49d80dcf | ||
|
|
33a02f0f52 | ||
|
|
4db07f9aef | ||
|
|
a4fa044c00 | ||
|
|
922788c604 | ||
|
|
d790d0d314 | ||
|
|
0c330423bc | ||
|
|
16f9f93eb7 | ||
|
|
a5daae2a5f | ||
|
|
0279e0e086 | ||
|
|
aee10768d8 | ||
|
|
7f5d753d06 | ||
|
|
16108c579d | ||
|
|
f00c4e7af0 | ||
|
|
ad8589d392 | ||
|
|
16ec8c3272 | ||
|
|
a0bc9d387e | ||
|
|
e12077a78a | ||
|
|
ddb240293a |
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
|
||||
8
.git-blame-ignore-revs
Normal file
8
.git-blame-ignore-revs
Normal file
@@ -0,0 +1,8 @@
|
||||
# Black reformatting (#5482).
|
||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
||||
|
||||
# Target Python 3.5 with black (#8664).
|
||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,13 +6,14 @@
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.pyc
|
||||
*.py[cod]
|
||||
*.snap
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
|
||||
260
CHANGES.md
260
CHANGES.md
@@ -1,9 +1,265 @@
|
||||
Synapse 1.xx.0
|
||||
==============
|
||||
Synapse 1.31.0 (2021-04-06)
|
||||
===========================
|
||||
|
||||
**Note:** As announced in v1.25.0, and in line with the deprecation policy for platform dependencies, this is the last release to support Python 3.5 and PostgreSQL 9.5. Future versions of Synapse will require Python 3.6+ and PostgreSQL 9.6+, as per our [deprecation policy](docs/deprecation_policy.md).
|
||||
|
||||
This is also the last release that the Synapse team will be publishing packages for Debian Stretch and Ubuntu Xenial.
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a document describing the deprecation policy for platform dependencies. ([\#9723](https://github.com/matrix-org/synapse/issues/9723))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Revert using `dmypy run` in lint script. ([\#9720](https://github.com/matrix-org/synapse/issues/9720))
|
||||
- Pin flake8-bugbear's version. ([\#9734](https://github.com/matrix-org/synapse/issues/9734))
|
||||
|
||||
|
||||
Synapse 1.31.0rc1 (2021-03-30)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support to OpenID Connect login for requiring attributes on the `userinfo` response. Contributed by Hubbe King. ([\#9609](https://github.com/matrix-org/synapse/issues/9609))
|
||||
- Add initial experimental support for a "space summary" API. ([\#9643](https://github.com/matrix-org/synapse/issues/9643), [\#9652](https://github.com/matrix-org/synapse/issues/9652), [\#9653](https://github.com/matrix-org/synapse/issues/9653))
|
||||
- Add support for the busy presence state as described in [MSC3026](https://github.com/matrix-org/matrix-doc/pull/3026). ([\#9644](https://github.com/matrix-org/synapse/issues/9644))
|
||||
- Add support for credentials for proxy authentication in the `HTTPS_PROXY` environment variable. ([\#9657](https://github.com/matrix-org/synapse/issues/9657))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a longstanding bug that could cause issues when editing a reply to a message. ([\#9585](https://github.com/matrix-org/synapse/issues/9585))
|
||||
- Fix the `/capabilities` endpoint to return `m.change_password` as disabled if the local password database is not used for authentication. Contributed by @dklimpel. ([\#9588](https://github.com/matrix-org/synapse/issues/9588))
|
||||
- Check if local passwords are enabled before setting them for the user. ([\#9636](https://github.com/matrix-org/synapse/issues/9636))
|
||||
- Fix a bug where federation sending can stall due to `concurrent access` database exceptions when it falls behind. ([\#9639](https://github.com/matrix-org/synapse/issues/9639))
|
||||
- Fix a bug introduced in Synapse 1.30.1 which meant the suggested `pip` incantation to install an updated `cryptography` was incorrect. ([\#9699](https://github.com/matrix-org/synapse/issues/9699))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Speed up Docker builds and make it nicer to test against Complement while developing (install all dependencies before copying the project). ([\#9610](https://github.com/matrix-org/synapse/issues/9610))
|
||||
- Include [opencontainers labels](https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys) in the Docker image. ([\#9612](https://github.com/matrix-org/synapse/issues/9612))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify that `register_new_matrix_user` is present also when installed via non-pip package. ([\#9074](https://github.com/matrix-org/synapse/issues/9074))
|
||||
- Update source install documentation to mention platform prerequisites before the source install steps. ([\#9667](https://github.com/matrix-org/synapse/issues/9667))
|
||||
- Improve worker documentation for fallback/web auth endpoints. ([\#9679](https://github.com/matrix-org/synapse/issues/9679))
|
||||
- Update the sample configuration for OIDC authentication. ([\#9695](https://github.com/matrix-org/synapse/issues/9695))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Preparatory steps for removing redundant `outlier` data from `event_json.internal_metadata` column. ([\#9411](https://github.com/matrix-org/synapse/issues/9411))
|
||||
- Add type hints to the caching module. ([\#9442](https://github.com/matrix-org/synapse/issues/9442))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9499](https://github.com/matrix-org/synapse/issues/9499), [\#9659](https://github.com/matrix-org/synapse/issues/9659))
|
||||
- Add additional type hints to the Homeserver object. ([\#9631](https://github.com/matrix-org/synapse/issues/9631), [\#9638](https://github.com/matrix-org/synapse/issues/9638), [\#9675](https://github.com/matrix-org/synapse/issues/9675), [\#9681](https://github.com/matrix-org/synapse/issues/9681))
|
||||
- Only save remote cross-signing and device keys if they're different from the current ones. ([\#9634](https://github.com/matrix-org/synapse/issues/9634))
|
||||
- Rename storage function to fix spelling and not conflict with another function's name. ([\#9637](https://github.com/matrix-org/synapse/issues/9637))
|
||||
- Improve performance of federation catch up by sending the latest events in the room to the remote, rather than just the last event sent by the local server. ([\#9640](https://github.com/matrix-org/synapse/issues/9640), [\#9664](https://github.com/matrix-org/synapse/issues/9664))
|
||||
- In the `federation_client` commandline client, stop automatically adding the URL prefix, so that servlets on other prefixes can be tested. ([\#9645](https://github.com/matrix-org/synapse/issues/9645))
|
||||
- In the `federation_client` commandline client, handle inline `signing_key`s in `homeserver.yaml`. ([\#9647](https://github.com/matrix-org/synapse/issues/9647))
|
||||
- Fixed some antipattern issues to improve code quality. ([\#9649](https://github.com/matrix-org/synapse/issues/9649))
|
||||
- Add a storage method for pulling all current user presence state from the database. ([\#9650](https://github.com/matrix-org/synapse/issues/9650))
|
||||
- Import `HomeServer` from the proper module. ([\#9665](https://github.com/matrix-org/synapse/issues/9665))
|
||||
- Increase default join ratelimiting burst rate. ([\#9674](https://github.com/matrix-org/synapse/issues/9674))
|
||||
- Add type hints to third party event rules and visibility modules. ([\#9676](https://github.com/matrix-org/synapse/issues/9676))
|
||||
- Bump mypy-zope to 0.2.13 to fix "Cannot determine consistent method resolution order (MRO)" errors when running mypy a second time. ([\#9678](https://github.com/matrix-org/synapse/issues/9678))
|
||||
- Use interpreter from `$PATH` via `/usr/bin/env` instead of absolute paths in various scripts. ([\#9689](https://github.com/matrix-org/synapse/issues/9689))
|
||||
- Make it possible to use `dmypy`. ([\#9692](https://github.com/matrix-org/synapse/issues/9692))
|
||||
- Suppress "CryptographyDeprecationWarning: int_from_bytes is deprecated". ([\#9698](https://github.com/matrix-org/synapse/issues/9698))
|
||||
- Use `dmypy run` in lint script for improved performance in type-checking while developing. ([\#9701](https://github.com/matrix-org/synapse/issues/9701))
|
||||
- Fix undetected mypy error when using Python 3.6. ([\#9703](https://github.com/matrix-org/synapse/issues/9703))
|
||||
- Fix type-checking CI on develop. ([\#9709](https://github.com/matrix-org/synapse/issues/9709))
|
||||
|
||||
|
||||
Synapse 1.30.1 (2021-03-26)
|
||||
===========================
|
||||
|
||||
This release is identical to Synapse 1.30.0, with the exception of explicitly
|
||||
setting a minimum version of Python's Cryptography library to ensure that users
|
||||
of Synapse are protected from the recent [OpenSSL security advisories](https://mta.openssl.org/pipermail/openssl-announce/2021-March/000198.html),
|
||||
especially CVE-2021-3449.
|
||||
|
||||
Note that Cryptography defaults to bundling its own statically linked copy of
|
||||
OpenSSL, which means that you may not be protected by your operating system's
|
||||
security updates.
|
||||
|
||||
It's also worth noting that Cryptography no longer supports Python 3.5, so
|
||||
admins deploying to older environments may not be protected against this or
|
||||
future vulnerabilities. Synapse will be dropping support for Python 3.5 at the
|
||||
end of March.
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Ensure that the docker container has up to date versions of openssl. ([\#9697](https://github.com/matrix-org/synapse/issues/9697))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Enforce that `cryptography` dependency is up to date to ensure it has the most recent openssl patches. ([\#9697](https://github.com/matrix-org/synapse/issues/9697))
|
||||
|
||||
|
||||
Synapse 1.30.0 (2021-03-22)
|
||||
===========================
|
||||
|
||||
Note that this release deprecates the ability for appservices to
|
||||
call `POST /_matrix/client/r0/register` without the body parameter `type`. Appservice
|
||||
developers should use a `type` value of `m.login.application_service` as
|
||||
per [the spec](https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions).
|
||||
In future releases, calling this endpoint with an access token - but without a `m.login.application_service`
|
||||
type - will fail.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.30.0rc1 (2021-03-16)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add prometheus metrics for number of users successfully registering and logging in. ([\#9510](https://github.com/matrix-org/synapse/issues/9510), [\#9511](https://github.com/matrix-org/synapse/issues/9511), [\#9573](https://github.com/matrix-org/synapse/issues/9573))
|
||||
- Add `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time` prometheus metrics, which monitor federation delays by reporting the timestamps of messages sent and received to a set of remote servers. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Add support for generating JSON Web Tokens dynamically for use as OIDC client secrets. ([\#9549](https://github.com/matrix-org/synapse/issues/9549))
|
||||
- Optimise handling of incomplete room history for incoming federation. ([\#9601](https://github.com/matrix-org/synapse/issues/9601))
|
||||
- Finalise support for allowing clients to pick an SSO Identity Provider ([MSC2858](https://github.com/matrix-org/matrix-doc/pull/2858)). ([\#9617](https://github.com/matrix-org/synapse/issues/9617))
|
||||
- Tell spam checker modules about the SSO IdP a user registered through if one was used. ([\#9626](https://github.com/matrix-org/synapse/issues/9626))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix long-standing bug when generating thumbnails for some images with transparency: `TypeError: cannot unpack non-iterable int object`. ([\#9473](https://github.com/matrix-org/synapse/issues/9473))
|
||||
- Purge chain cover indexes for events that were purged prior to Synapse v1.29.0. ([\#9542](https://github.com/matrix-org/synapse/issues/9542), [\#9583](https://github.com/matrix-org/synapse/issues/9583))
|
||||
- Fix bug where federation requests were not correctly retried on 5xx responses. ([\#9567](https://github.com/matrix-org/synapse/issues/9567))
|
||||
- Fix re-activating an account via the admin API when local passwords are disabled. ([\#9587](https://github.com/matrix-org/synapse/issues/9587))
|
||||
- Fix a bug introduced in Synapse 1.20 which caused incoming federation transactions to stack up, causing slow recovery from outages. ([\#9597](https://github.com/matrix-org/synapse/issues/9597))
|
||||
- Fix a bug introduced in v1.28.0 where the OpenID Connect callback endpoint could error with a `MacaroonInitException`. ([\#9620](https://github.com/matrix-org/synapse/issues/9620))
|
||||
- Fix Internal Server Error on `GET /_synapse/client/saml2/authn_response` request. ([\#9623](https://github.com/matrix-org/synapse/issues/9623))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Make use of an improved malloc implementation (`jemalloc`) in the docker image. ([\#8553](https://github.com/matrix-org/synapse/issues/8553))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add relayd entry to reverse proxy example configurations. ([\#9508](https://github.com/matrix-org/synapse/issues/9508))
|
||||
- Improve the SAML2 upgrade notes for 1.27.0. ([\#9550](https://github.com/matrix-org/synapse/issues/9550))
|
||||
- Link to the "List user's media" admin API from the media admin API docs. ([\#9571](https://github.com/matrix-org/synapse/issues/9571))
|
||||
- Clarify the spam checker modules documentation example to mention that `parse_config` is a required method. ([\#9580](https://github.com/matrix-org/synapse/issues/9580))
|
||||
- Clarify the sample configuration for `stats` settings. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The `synapse_federation_last_sent_pdu_age` and `synapse_federation_last_received_pdu_age` prometheus metrics have been removed. They are replaced by `synapse_federation_last_sent_pdu_time` and `synapse_federation_last_received_pdu_time`. ([\#9540](https://github.com/matrix-org/synapse/issues/9540))
|
||||
- Registering an Application Service user without using the `m.login.application_service` login type will be unsupported in an upcoming Synapse release. ([\#9559](https://github.com/matrix-org/synapse/issues/9559))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add tests to ResponseCache. ([\#9458](https://github.com/matrix-org/synapse/issues/9458))
|
||||
- Add type hints to purge room and server notice admin API. ([\#9520](https://github.com/matrix-org/synapse/issues/9520))
|
||||
- Add extra logging to ObservableDeferred when callbacks throw exceptions. ([\#9523](https://github.com/matrix-org/synapse/issues/9523))
|
||||
- Fix incorrect type hints. ([\#9528](https://github.com/matrix-org/synapse/issues/9528), [\#9543](https://github.com/matrix-org/synapse/issues/9543), [\#9591](https://github.com/matrix-org/synapse/issues/9591), [\#9608](https://github.com/matrix-org/synapse/issues/9608), [\#9618](https://github.com/matrix-org/synapse/issues/9618))
|
||||
- Add an additional test for purging a room. ([\#9541](https://github.com/matrix-org/synapse/issues/9541))
|
||||
- Add a `.git-blame-ignore-revs` file with the hashes of auto-formatting. ([\#9560](https://github.com/matrix-org/synapse/issues/9560))
|
||||
- Increase the threshold before which outbound federation to a server goes into "catch up" mode, which is expensive for the remote server to handle. ([\#9561](https://github.com/matrix-org/synapse/issues/9561))
|
||||
- Fix spurious errors reported by the `config-lint.sh` script. ([\#9562](https://github.com/matrix-org/synapse/issues/9562))
|
||||
- Fix type hints and tests for BlacklistingAgentWrapper and BlacklistingReactorWrapper. ([\#9563](https://github.com/matrix-org/synapse/issues/9563))
|
||||
- Do not have mypy ignore type hints from unpaddedbase64. ([\#9568](https://github.com/matrix-org/synapse/issues/9568))
|
||||
- Improve efficiency of calculating the auth chain in large rooms. ([\#9576](https://github.com/matrix-org/synapse/issues/9576))
|
||||
- Convert `synapse.types.Requester` to an `attrs` class. ([\#9586](https://github.com/matrix-org/synapse/issues/9586))
|
||||
- Add logging for redis connection setup. ([\#9590](https://github.com/matrix-org/synapse/issues/9590))
|
||||
- Improve logging when processing incoming transactions. ([\#9596](https://github.com/matrix-org/synapse/issues/9596))
|
||||
- Remove unused `stats.retention` setting, and emit a warning if stats are disabled. ([\#9604](https://github.com/matrix-org/synapse/issues/9604))
|
||||
- Prevent attempting to bundle aggregations for state events in /context APIs. ([\#9619](https://github.com/matrix-org/synapse/issues/9619))
|
||||
|
||||
|
||||
Synapse 1.29.0 (2021-03-08)
|
||||
===========================
|
||||
|
||||
Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
|
||||
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.29.0rc1 (2021-03-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add rate limiters to cross-user key sharing requests. ([\#8957](https://github.com/matrix-org/synapse/issues/8957))
|
||||
- Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel. ([\#8978](https://github.com/matrix-org/synapse/issues/8978))
|
||||
- Add some configuration settings to make users' profile data more private. ([\#9203](https://github.com/matrix-org/synapse/issues/9203))
|
||||
- The `no_proxy` and `NO_PROXY` environment variables are now respected in proxied HTTP clients with the lowercase form taking precedence if both are present. Additionally, the lowercase `https_proxy` environment variable is now respected in proxied HTTP clients on top of existing support for the uppercase `HTTPS_PROXY` form and takes precedence if both are present. Contributed by Timothy Leung. ([\#9372](https://github.com/matrix-org/synapse/issues/9372))
|
||||
- Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users. ([\#9383](https://github.com/matrix-org/synapse/issues/9383), [\#9385](https://github.com/matrix-org/synapse/issues/9385))
|
||||
- Add support for regenerating thumbnails if they have been deleted but the original image is still stored. ([\#9438](https://github.com/matrix-org/synapse/issues/9438))
|
||||
- Add support for `X-Forwarded-Proto` header when using a reverse proxy. ([\#9472](https://github.com/matrix-org/synapse/issues/9472), [\#9501](https://github.com/matrix-org/synapse/issues/9501), [\#9512](https://github.com/matrix-org/synapse/issues/9512), [\#9539](https://github.com/matrix-org/synapse/issues/9539))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug where users' pushers were not all deleted when they deactivated their account. ([\#9285](https://github.com/matrix-org/synapse/issues/9285), [\#9516](https://github.com/matrix-org/synapse/issues/9516))
|
||||
- Fix a bug where a lot of unnecessary presence updates were sent when joining a room. ([\#9402](https://github.com/matrix-org/synapse/issues/9402))
|
||||
- Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results. ([\#9416](https://github.com/matrix-org/synapse/issues/9416))
|
||||
- Fix a bug in single sign-on which could cause a "No session cookie found" error. ([\#9436](https://github.com/matrix-org/synapse/issues/9436))
|
||||
- Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined. ([\#9440](https://github.com/matrix-org/synapse/issues/9440))
|
||||
- Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`. ([\#9449](https://github.com/matrix-org/synapse/issues/9449))
|
||||
- Fix deleting pushers when using sharded pushers. ([\#9465](https://github.com/matrix-org/synapse/issues/9465), [\#9466](https://github.com/matrix-org/synapse/issues/9466), [\#9479](https://github.com/matrix-org/synapse/issues/9479), [\#9536](https://github.com/matrix-org/synapse/issues/9536))
|
||||
- Fix missing startup checks for the consistency of certain PostgreSQL sequences. ([\#9470](https://github.com/matrix-org/synapse/issues/9470))
|
||||
- Fix a long-standing bug where the media repository could leak file descriptors while previewing media. ([\#9497](https://github.com/matrix-org/synapse/issues/9497))
|
||||
- Properly purge the event chain cover index when purging history. ([\#9498](https://github.com/matrix-org/synapse/issues/9498))
|
||||
- Fix missing chain cover index due to a schema delta not being applied correctly. Only affected servers that ran development versions. ([\#9503](https://github.com/matrix-org/synapse/issues/9503))
|
||||
- Fix a bug introduced in v1.25.0 where `/_synapse/admin/join/` would fail when given a room alias. ([\#9506](https://github.com/matrix-org/synapse/issues/9506))
|
||||
- Prevent presence background jobs from running when presence is disabled. ([\#9530](https://github.com/matrix-org/synapse/issues/9530))
|
||||
- Fix rare edge case that caused a background update to fail if the server had rejected an event that had duplicate auth events. ([\#9537](https://github.com/matrix-org/synapse/issues/9537))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update the example systemd config to propagate reloads to individual units. ([\#9463](https://github.com/matrix-org/synapse/issues/9463))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add documentation and type hints to `parse_duration`. ([\#9432](https://github.com/matrix-org/synapse/issues/9432))
|
||||
- Remove vestiges of `uploads_path` configuration setting. ([\#9462](https://github.com/matrix-org/synapse/issues/9462))
|
||||
- Add a comment about systemd-python. ([\#9464](https://github.com/matrix-org/synapse/issues/9464))
|
||||
- Test that we require validated email for email pushers. ([\#9496](https://github.com/matrix-org/synapse/issues/9496))
|
||||
- Allow python to generate bytecode for synapse. ([\#9502](https://github.com/matrix-org/synapse/issues/9502))
|
||||
- Fix incorrect type hints. ([\#9515](https://github.com/matrix-org/synapse/issues/9515), [\#9518](https://github.com/matrix-org/synapse/issues/9518))
|
||||
- Add type hints to device and event report admin API. ([\#9519](https://github.com/matrix-org/synapse/issues/9519))
|
||||
- Add type hints to user admin API. ([\#9521](https://github.com/matrix-org/synapse/issues/9521))
|
||||
- Bump the versions of mypy and mypy-zope used for static type checking. ([\#9529](https://github.com/matrix-org/synapse/issues/9529))
|
||||
|
||||
|
||||
Synapse 1.28.0 (2021-02-25)
|
||||
===========================
|
||||
|
||||
|
||||
38
INSTALL.md
38
INSTALL.md
@@ -6,7 +6,7 @@ There are 3 steps to follow under **Installation Instructions**.
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Platform-specific prerequisites](#platform-specific-prerequisites)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
@@ -38,6 +38,7 @@ There are 3 steps to follow under **Installation Instructions**.
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
|
||||
## Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
@@ -60,17 +61,14 @@ that your email address is probably `user@example.com` rather than
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions. See [Platform-Specific
|
||||
Instructions](#platform-specific-instructions) for information on installing
|
||||
these on various platforms.
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
@@ -128,7 +126,11 @@ source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-Specific Instructions
|
||||
#### Platform-specific prerequisites
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
|
||||
@@ -526,14 +528,24 @@ email will be disabled.
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively you can do so from the command line if you have installed via pip.
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
|
||||
This can be done as follows:
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
on the search path):
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start # if not already running
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
|
||||
```sh
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
the running Synapse to create the new user. For example:
|
||||
```
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
|
||||
@@ -20,9 +20,10 @@ recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
include tests/http/ca.crt
|
||||
include tests/http/ca.key
|
||||
include tests/http/server.key
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.p8
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
|
||||
16
README.rst
16
README.rst
@@ -183,8 +183,9 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
@@ -313,6 +314,15 @@ Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -388,7 +398,7 @@ likely cause. The misbehavior can be worked around by setting
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
16
UPGRADE.rst
16
UPGRADE.rst
@@ -98,9 +98,12 @@ will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client. See the [reverse proxy
|
||||
documentation](docs/reverse_proxy.md), where the example configurations have
|
||||
been updated to show how to set this header.
|
||||
indicate the protocol used by the client.
|
||||
|
||||
Synapse also requires the `Host` header to be preserved.
|
||||
|
||||
See the `reverse proxy documentation <docs/reverse_proxy.md>`_, where the
|
||||
example configurations have been updated to show how to set these headers.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
@@ -124,6 +127,13 @@ This version changes the URI used for callbacks from OAuth2 and SAML2 identity p
|
||||
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
|
||||
"ACS location" (also known as "allowed callback URLs") at the identity provider.
|
||||
|
||||
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
|
||||
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
|
||||
provider uses this property to validate or otherwise identify Synapse, its configuration
|
||||
will need to be updated to use the new URL. Alternatively you could create a new, separate
|
||||
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
|
||||
the existing "EntityDescriptor" as they were.
|
||||
|
||||
Changes to HTML templates
|
||||
-------------------------
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Temporarily drop cross-user m.room_key_request to_device messages over performance concerns.
|
||||
@@ -1 +0,0 @@
|
||||
Add rate limiters to cross-user key sharing requests.
|
||||
@@ -1 +0,0 @@
|
||||
Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel.
|
||||
@@ -1 +0,0 @@
|
||||
Add some configuration settings to make users' profile data more private.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where users' pushers were not all deleted when they deactivated their account.
|
||||
@@ -1 +0,0 @@
|
||||
Added a fix that invalidates cache for empty timed-out sync responses.
|
||||
@@ -1 +0,0 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
@@ -1 +0,0 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where a lot of unnecessary presence updates were sent when joining a room.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results.
|
||||
@@ -1 +0,0 @@
|
||||
Add documentation and type hints to `parse_duration`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug in single sign-on which could cause a "No session cookie found" error.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for regenerating thumbnails if they have been deleted but the original image is still stored.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`.
|
||||
@@ -1 +0,0 @@
|
||||
Remove vestiges of `uploads_path` configuration setting.
|
||||
@@ -1 +0,0 @@
|
||||
Update the example systemd config to propagate reloads to individual units.
|
||||
@@ -1 +0,0 @@
|
||||
Add a comment about systemd-python.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix missing startup checks for the consistency of certain PostgreSQL sequences.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Test that we require validated email for email pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -58,10 +58,10 @@ trap "rm -r $tmpdir" EXIT
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
@@ -87,7 +87,7 @@ PYTHONPATH="$tmpdir" \
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
# build the log config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
|
||||
28
debian/changelog
vendored
28
debian/changelog
vendored
@@ -1,3 +1,31 @@
|
||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.31.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Apr 2021 13:08:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.30.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Mar 2021 12:01:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
|
||||
|
||||
matrix-synapse-py3 (1.29.0) stable; urgency=medium
|
||||
|
||||
[ Jonathan de Jong ]
|
||||
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.29.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
|
||||
|
||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.28.0.
|
||||
|
||||
2
debian/synctl.1
vendored
2
debian/synctl.1
vendored
@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
|
||||
.
|
||||
.nf
|
||||
|
||||
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
.
|
||||
.fi
|
||||
.
|
||||
|
||||
2
debian/synctl.ronn
vendored
2
debian/synctl.ronn
vendored
@@ -41,7 +41,7 @@ process.
|
||||
|
||||
Configuration file may be generated as follows:
|
||||
|
||||
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
|
||||
## ENVIRONMENT
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
@@ -18,6 +18,11 @@ ARG PYTHON_VERSION=3.8
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
|
||||
# install the OS build deps
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
@@ -28,33 +33,32 @@ RUN apt-get update && apt-get install -y \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
cryptography \
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
# Match the version constraints of Synapse
|
||||
"prometheus_client>=0.4.0" \
|
||||
psycopg2 \
|
||||
pycparser \
|
||||
pyrsistent \
|
||||
pyyaml \
|
||||
simplejson \
|
||||
threadloop \
|
||||
thrift
|
||||
|
||||
# now install synapse and all of the python deps to /install.
|
||||
COPY synapse /synapse/synapse/
|
||||
# Copy just what we need to pip install
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project so that we this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
#
|
||||
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
/synapse[all]
|
||||
/synapse[all]
|
||||
|
||||
# Copy over the rest of the project
|
||||
COPY synapse /synapse/synapse/
|
||||
|
||||
# Install the synapse package itself and all of its children packages.
|
||||
#
|
||||
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
###
|
||||
### Stage 1: runtime
|
||||
@@ -69,7 +73,10 @@ RUN apt-get update && apt-get install -y \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libjemalloc2 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
@@ -82,4 +89,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -204,3 +204,8 @@ healthcheck:
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.md)
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# The script to build the Debian package, as ran inside the Docker image.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||
# the relevant source files to be mounted into /src (done automatically by the
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import codecs
|
||||
import glob
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -213,6 +214,13 @@ def main(args, environ):
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
@@ -248,9 +256,9 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = ["python"] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
os.execve("/usr/local/bin/python", args, environ)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Contents
|
||||
- [List all media in a room](#list-all-media-in-a-room)
|
||||
- [Querying media](#querying-media)
|
||||
* [List all media in a room](#list-all-media-in-a-room)
|
||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
@@ -10,7 +12,11 @@
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# List all media in a room
|
||||
# Querying media
|
||||
|
||||
These APIs allow extracting media information from the homeserver.
|
||||
|
||||
## List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
However, it only shows media from unencrypted events or rooms.
|
||||
@@ -36,6 +42,12 @@ The API returns a JSON body like the following:
|
||||
}
|
||||
```
|
||||
|
||||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
|
||||
33
docs/deprecation_policy.md
Normal file
33
docs/deprecation_policy.md
Normal file
@@ -0,0 +1,33 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
------
|
||||
|
||||
Synapse follows the upstream support life cycles for Python and PostgreSQL,
|
||||
i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
version in future releases.
|
||||
|
||||
Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at https://endoflife.date/python and
|
||||
https://endoflife.date/postgresql.
|
||||
|
||||
|
||||
Context
|
||||
-------
|
||||
|
||||
It is important for system admins to have a clear understanding of the platform
|
||||
requirements of Synapse and its deprecation policies so that they can
|
||||
effectively plan upgrading their infrastructure ahead of time. This is
|
||||
especially important in contexts where upgrading the infrastructure requires
|
||||
auditing and approval from a security team, or where otherwise upgrading is a
|
||||
long process.
|
||||
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
@@ -226,7 +226,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "org.matrix.github" # optional: styling hint for clients
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -252,7 +252,7 @@ oidc_providers:
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "org.matrix.google" # optional: styling hint for clients
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -299,7 +299,7 @@ Synapse config:
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "org.matrix.gitlab" # optional: styling hint for clients
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
@@ -334,7 +334,7 @@ Synapse config:
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "org.matrix.facebook" # optional: styling hint for clients
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
@@ -386,7 +386,7 @@ oidc_providers:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
@@ -401,8 +401,7 @@ oidc_providers:
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
# Needed until https://github.com/matrix-org/synapse/issues/9212 is fixed
|
||||
client_secret: "dontcare"
|
||||
client_auth_method: none
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
@@ -410,3 +409,40 @@ oidc_providers:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
## Apple
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
It is recommended to put a reverse proxy such as
|
||||
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
|
||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
|
||||
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
||||
[HAProxy](https://www.haproxy.org/) or
|
||||
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
|
||||
of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
privileges.
|
||||
@@ -53,6 +54,8 @@ server {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
@@ -101,10 +104,11 @@ example.com:8448 {
|
||||
```
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
ServerName matrix.example.com
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPreserveHost on
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon
|
||||
@@ -113,7 +117,7 @@ example.com:8448 {
|
||||
|
||||
<VirtualHost *:8448>
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
ServerName example.com
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
@@ -132,6 +136,8 @@ example.com:8448 {
|
||||
</IfModule>
|
||||
```
|
||||
|
||||
**NOTE 3**: Missing `ProxyPreserveHost on` can lead to a redirect loop.
|
||||
|
||||
### HAProxy
|
||||
|
||||
```
|
||||
@@ -160,6 +166,52 @@ backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Relayd
|
||||
|
||||
```
|
||||
table <webserver> { 127.0.0.1 }
|
||||
table <matrixserver> { 127.0.0.1 }
|
||||
|
||||
http protocol "https" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
match header set "X-Forwarded-For" value "$REMOTE_ADDR"
|
||||
match header set "X-Forwarded-Proto" value "https"
|
||||
|
||||
# set CORS header for .well-known/matrix/server, .well-known/matrix/client
|
||||
# httpd does not support setting headers, so do it here
|
||||
match request path "/.well-known/matrix/*" tag "matrix-cors"
|
||||
match response tagged "matrix-cors" header set "Access-Control-Allow-Origin" value "*"
|
||||
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
|
||||
# pass on non-matrix traffic to webserver
|
||||
pass forward to <webserver>
|
||||
}
|
||||
|
||||
relay "https_traffic" {
|
||||
listen on egress port 443 tls
|
||||
protocol "https"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
forward to <webserver> port 8080 check tcp
|
||||
}
|
||||
|
||||
http protocol "matrix" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
block
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
}
|
||||
|
||||
relay "matrix_federation" {
|
||||
listen on egress port 8448 tls
|
||||
protocol "matrix"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
}
|
||||
```
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
You will also want to set `bind_addresses: ['127.0.0.1']` and
|
||||
|
||||
@@ -89,8 +89,7 @@ pid_file: DATADIR/homeserver.pid
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
@@ -870,10 +869,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_3pid_validation:
|
||||
# per_second: 0.003
|
||||
@@ -1759,6 +1758,9 @@ saml2_config:
|
||||
# Note that, if this is changed, users authenticating via that provider
|
||||
# will no longer be recognised as the same user!
|
||||
#
|
||||
# (Use "oidc" here if you are migrating from an old "oidc_config"
|
||||
# configuration.)
|
||||
#
|
||||
# idp_name: A user-facing name for this identity provider, which is used to
|
||||
# offer the user a choice of login mechanisms.
|
||||
#
|
||||
@@ -1780,7 +1782,26 @@ saml2_config:
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -1855,6 +1876,24 @@ saml2_config:
|
||||
# which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
# in the ID Token.
|
||||
#
|
||||
# It is possible to configure Synapse to only allow logins if certain attributes
|
||||
# match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted. Additional attributes can be added to
|
||||
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
|
||||
# additional information from the OIDC provider.
|
||||
#
|
||||
# If the OIDC claim is a list, then the attribute must match any value in the list.
|
||||
# Otherwise, it must exactly match the value of the claim. Using the example
|
||||
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
|
||||
# claim MUST contain "admin".
|
||||
#
|
||||
# attribute_requirements:
|
||||
# - attribute: family_name
|
||||
# value: "Stephensson"
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
@@ -1887,34 +1926,9 @@ oidc_providers:
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# email_template: "{{ user.email }}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
#- idp_id: keycloak
|
||||
# idp_name: Keycloak
|
||||
# issuer: "https://127.0.0.1:8443/auth/realms/my_realm_name"
|
||||
# client_id: "synapse"
|
||||
# client_secret: "copy secret generated in Keycloak UI"
|
||||
# scopes: ["openid", "profile"]
|
||||
|
||||
# For use with Github
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: org.matrix.github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
# client_secret: "your-client-secret" # TO BE FILLED
|
||||
# authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
# token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
# userinfo_endpoint: "https://api.github.com/user"
|
||||
# scopes: ["read:user"]
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "synapseUsers"
|
||||
|
||||
|
||||
# Enable Central Authentication Service (CAS) for registration and login.
|
||||
@@ -2627,19 +2641,20 @@ user_directory:
|
||||
|
||||
|
||||
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
|
||||
|
||||
# Server Notices room configuration
|
||||
|
||||
@@ -14,6 +14,7 @@ The Python class is instantiated with two objects:
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
All the methods must be defined.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
@@ -24,6 +25,7 @@ well as some specific methods:
|
||||
* `user_may_publish_room`
|
||||
* `check_username_for_spam`
|
||||
* `check_registration_for_spam`
|
||||
* `check_media_file_for_spam`
|
||||
|
||||
The details of each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
@@ -31,6 +33,10 @@ are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
The `ModuleApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals.
|
||||
|
||||
Additionally, a `parse_config` method is mandatory and receives the plugin config
|
||||
dictionary. After parsing, It must return an object which will be
|
||||
passed to `__init__` later.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
@@ -41,6 +47,10 @@ class ExampleSpamChecker:
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config):
|
||||
return config
|
||||
|
||||
async def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
@@ -59,7 +69,13 @@ class ExampleSpamChecker:
|
||||
async def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
|
||||
async def check_registration_for_spam(self, email_threepid, username, request_info):
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
):
|
||||
return RegistrationBehaviour.ALLOW # allow all registrations
|
||||
|
||||
async def check_media_file_for_spam(self, file_wrapper, file_info):
|
||||
|
||||
@@ -232,7 +232,6 @@ expressions:
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact
|
||||
@@ -276,7 +275,7 @@ using):
|
||||
|
||||
Ensure that all SSO logins go to a single process.
|
||||
For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
|
||||
12
mypy.ini
12
mypy.ini
@@ -1,12 +1,13 @@
|
||||
[mypy]
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = silent
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
warn_unreachable = True
|
||||
local_partial_types = True
|
||||
|
||||
# To find all folders that pass mypy you run:
|
||||
#
|
||||
@@ -20,8 +21,9 @@ files =
|
||||
synapse/crypto,
|
||||
synapse/event_auth.py,
|
||||
synapse/events/builder.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/events/third_party_rules.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
synapse/handlers,
|
||||
@@ -38,6 +40,7 @@ files =
|
||||
synapse/push,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
@@ -69,7 +72,9 @@ files =
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
tests/handlers/test_password_providers.py,
|
||||
@@ -116,9 +121,6 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-unpaddedbase64]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Restore backup of sample config upon script exit
|
||||
trap "mv docs/sample_config.yaml.bak docs/sample_config.yaml" EXIT
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
diff docs/sample_config.yaml docs/sample_config.yaml.bak
|
||||
|
||||
@@ -22,8 +22,8 @@ import sys
|
||||
from typing import Any, Optional
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import nacl.signing
|
||||
import requests
|
||||
import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
@@ -44,18 +44,6 @@ def encode_base64(input_bytes):
|
||||
return output_string
|
||||
|
||||
|
||||
def decode_base64(input_string):
|
||||
"""Decode a base64 string to bytes inferring padding from the length of the
|
||||
string."""
|
||||
|
||||
input_bytes = input_string.encode("ascii")
|
||||
input_len = len(input_bytes)
|
||||
padding = b"=" * (3 - ((input_len + 3) % 4))
|
||||
output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
|
||||
output_bytes = base64.b64decode(input_bytes + padding)
|
||||
return output_bytes[:output_len]
|
||||
|
||||
|
||||
def encode_canonical_json(value):
|
||||
return json.dumps(
|
||||
value,
|
||||
@@ -88,42 +76,6 @@ def sign_json(
|
||||
return json_object
|
||||
|
||||
|
||||
NACL_ED25519 = "ed25519"
|
||||
|
||||
|
||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||
"""Decode a base64 encoded signing key
|
||||
Args:
|
||||
algorithm (str): The algorithm the key is for (currently "ed25519").
|
||||
version (str): Identifies this key out of the keys for this entity.
|
||||
key_base64 (str): Base64 encoded bytes of the key.
|
||||
Returns:
|
||||
A SigningKey object.
|
||||
"""
|
||||
if algorithm == NACL_ED25519:
|
||||
key_bytes = decode_base64(key_base64)
|
||||
key = nacl.signing.SigningKey(key_bytes)
|
||||
key.version = version
|
||||
key.alg = NACL_ED25519
|
||||
return key
|
||||
else:
|
||||
raise ValueError("Unsupported algorithm %s" % (algorithm,))
|
||||
|
||||
|
||||
def read_signing_keys(stream):
|
||||
"""Reads a list of keys from a stream
|
||||
Args:
|
||||
stream : A stream to iterate for keys.
|
||||
Returns:
|
||||
list of SigningKey objects.
|
||||
"""
|
||||
keys = []
|
||||
for line in stream:
|
||||
algorithm, version, key_base64 = line.split()
|
||||
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
|
||||
return keys
|
||||
|
||||
|
||||
def request(
|
||||
method: Optional[str],
|
||||
origin_name: str,
|
||||
@@ -223,23 +175,28 @@ def main():
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
args.signing_key = None
|
||||
if args.signing_key_path:
|
||||
with open(args.signing_key_path) as f:
|
||||
args.signing_key = f.readline()
|
||||
|
||||
if not args.server_name or not args.signing_key:
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
algorithm, version, key_base64 = args.signing_key.split()
|
||||
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
|
||||
|
||||
result = request(
|
||||
args.method,
|
||||
args.server_name,
|
||||
key,
|
||||
args.destination,
|
||||
"/_matrix/federation/v1/" + args.path,
|
||||
args.path,
|
||||
content=args.body,
|
||||
)
|
||||
|
||||
@@ -255,10 +212,16 @@ def main():
|
||||
def read_args_from_config(args):
|
||||
with open(args.config, "r") as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
|
||||
if not args.signing_key:
|
||||
if "signing_key" in config:
|
||||
args.signing_key = config["signing_key"]
|
||||
else:
|
||||
with open(config["signing_key_path"]) as f:
|
||||
args.signing_key = f.readline()
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script generates SQL files for creating a brand new Synapse DB with the latest
|
||||
# schema, on both SQLite3 and Postgres.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
@@ -6,4 +6,4 @@ set -e
|
||||
# next PR number.
|
||||
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
|
||||
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
|
||||
echo $CURRENT_NUMBER
|
||||
echo $CURRENT_NUMBER
|
||||
|
||||
@@ -51,7 +51,7 @@ def main(src_repo, dest_repo):
|
||||
parts = line.split("|")
|
||||
if len(parts) != 2:
|
||||
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ from synapse.storage.databases.main.events_bg_updates import (
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
@@ -177,6 +178,7 @@ class Store(
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
EndToEndKeyBackgroundStore,
|
||||
StatsStore,
|
||||
PusherWorkerStore,
|
||||
):
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
@@ -3,6 +3,7 @@ test_suite = tests
|
||||
|
||||
[check-manifest]
|
||||
ignore =
|
||||
.git-blame-ignore-revs
|
||||
contrib
|
||||
contrib/*
|
||||
docs/*
|
||||
@@ -17,7 +18,8 @@ ignore =
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
# B00*: Subsection of the bugbear suite (TODO: add in remaining fixes)
|
||||
ignore=W503,W504,E203,E731,E501,B006,B007,B008
|
||||
|
||||
[isort]
|
||||
line_length = 88
|
||||
|
||||
3
setup.py
3
setup.py
@@ -99,10 +99,11 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"isort==5.7.0",
|
||||
"black==20.8b1",
|
||||
"flake8-comprehensions",
|
||||
"flake8-bugbear==21.3.2",
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"]
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"""
|
||||
from typing import Any, List, Optional, Type, Union
|
||||
|
||||
class RedisProtocol:
|
||||
from twisted.internet import protocol
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
async def ping(self) -> None: ...
|
||||
async def set(
|
||||
@@ -52,7 +54,7 @@ def lazyConnection(
|
||||
|
||||
class ConnectionHandler: ...
|
||||
|
||||
class RedisFactory:
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
continueTrying: bool
|
||||
handler: RedisProtocol
|
||||
pool: List[RedisProtocol]
|
||||
|
||||
@@ -48,7 +48,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.28.0"
|
||||
__version__ = "1.31.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.logging import opentracing as opentracing
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -163,7 +164,7 @@ class Auth:
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: Request,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
@@ -408,7 +409,7 @@ class Auth:
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
user_id = get_value_from_macaroon(macaroon, "user_id")
|
||||
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
@@ -416,7 +417,12 @@ class Auth:
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
KeyError,
|
||||
TypeError,
|
||||
ValueError,
|
||||
):
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if rights == "access":
|
||||
@@ -424,27 +430,6 @@ class Auth:
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def get_user_id_from_macaroon(self, macaroon):
|
||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||
|
||||
Does *not* validate the macaroon.
|
||||
|
||||
Args:
|
||||
macaroon (pymacaroons.Macaroon): The macaroon to validate
|
||||
|
||||
Returns:
|
||||
(str) user id
|
||||
|
||||
Raises:
|
||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||
macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
@@ -465,21 +450,13 @@ class Auth:
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
satisfy_expiry(v, self.clock.time_msec)
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
|
||||
def _verify_expiry(self, caveat):
|
||||
prefix = "time < "
|
||||
if not caveat.startswith(prefix):
|
||||
return False
|
||||
expiry = int(caveat[len(prefix) :])
|
||||
now = self.hs.get_clock().time_msec()
|
||||
return now < expiry
|
||||
|
||||
def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
@@ -581,6 +558,9 @@ class Auth:
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
query_params = request.args.get(b"access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
@@ -597,6 +577,8 @@ class Auth:
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
query_params = request.args.get(b"access_token")
|
||||
|
||||
@@ -51,6 +51,7 @@ class PresenceState:
|
||||
OFFLINE = "offline"
|
||||
UNAVAILABLE = "unavailable"
|
||||
ONLINE = "online"
|
||||
BUSY = "org.matrix.msc3026.busy"
|
||||
|
||||
|
||||
class JoinRules:
|
||||
@@ -100,6 +101,9 @@ class EventTypes:
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child"
|
||||
MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
@@ -160,6 +164,9 @@ class EventContentFields:
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2228
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/1772
|
||||
MSC1772_ROOM_TYPE = "org.matrix.msc1772.type"
|
||||
|
||||
|
||||
class RoomEncryptionAlgorithms:
|
||||
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
|
||||
|
||||
@@ -17,14 +17,14 @@ import sys
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.DependencyException as e:
|
||||
sys.stderr.writelines(e.message)
|
||||
sys.stderr.writelines(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ import signal
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import Awaitable, Callable, Iterable
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from typing_extensions import NoReturn
|
||||
|
||||
from twisted.internet import defer, error, reactor
|
||||
@@ -195,6 +197,25 @@ def listen_metrics(bind_addresses, port):
|
||||
start_http_server(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: dict):
|
||||
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
|
||||
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
|
||||
# suppress the warning for now.
|
||||
warnings.filterwarnings(
|
||||
action="ignore",
|
||||
category=CryptographyDeprecationWarning,
|
||||
message="int_from_bytes is deprecated",
|
||||
)
|
||||
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
manhole(username="matrix", password="rabbithole", globals=manhole_globals),
|
||||
)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
|
||||
@@ -23,6 +23,7 @@ from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
@@ -146,7 +147,6 @@ from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
@@ -190,7 +190,7 @@ class KeyUploadServlet(RestServlet):
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
async def on_POST(self, request, device_id):
|
||||
async def on_POST(self, request: Request, device_id: Optional[str]):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
@@ -223,10 +223,12 @@ class KeyUploadServlet(RestServlet):
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop the the X-Forwarded-For header.
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
@@ -239,6 +241,14 @@ class KeyUploadServlet(RestServlet):
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
@@ -291,6 +301,8 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
||||
)
|
||||
|
||||
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
@@ -428,8 +440,12 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
PresenceState.ONLINE,
|
||||
PresenceState.UNAVAILABLE,
|
||||
PresenceState.OFFLINE,
|
||||
PresenceState.BUSY,
|
||||
)
|
||||
if presence not in valid_presence:
|
||||
|
||||
if presence not in valid_presence or (
|
||||
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
||||
):
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
user_id = target_user.to_string()
|
||||
@@ -623,12 +639,8 @@ class GenericWorkerServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
@@ -775,13 +787,6 @@ class FederationSenderHandler:
|
||||
|
||||
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
|
||||
@@ -67,7 +67,6 @@ from synapse.storage import DataStore
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -288,12 +287,8 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listening_services.extend(self._listener_http(config, listener))
|
||||
elif listener.type == "manhole":
|
||||
listen_tcp(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole(
|
||||
username="matrix", password="rabbithole", globals={"hs": self}
|
||||
),
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
|
||||
)
|
||||
elif listener.type == "replication":
|
||||
services = listen_tcp(
|
||||
|
||||
@@ -90,7 +90,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(
|
||||
hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
async def query_user(self, service, user_id):
|
||||
|
||||
@@ -212,9 +212,8 @@ class Config:
|
||||
|
||||
@classmethod
|
||||
def read_file(cls, file_path, config_name):
|
||||
cls.check_file(file_path, config_name)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
"""Deprecated: call read_file directly"""
|
||||
return read_file(file_path, (config_name,))
|
||||
|
||||
def read_template(self, filename: str) -> jinja2.Template:
|
||||
"""Load a template file from disk.
|
||||
@@ -894,4 +893,35 @@ class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
return self._get_instance(key)
|
||||
|
||||
|
||||
__all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"]
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str:
|
||||
"""Check the given file exists, and read it into a string
|
||||
|
||||
If it does not, emit an error indicating the problem
|
||||
|
||||
Args:
|
||||
file_path: the file to be read
|
||||
config_path: where in the configuration file_path came from, so that a useful
|
||||
error can be emitted if it does not exist.
|
||||
Returns:
|
||||
content of the file.
|
||||
Raises:
|
||||
ConfigError if there is a problem reading the file.
|
||||
"""
|
||||
if not isinstance(file_path, str):
|
||||
raise ConfigError("%r is not a string", config_path)
|
||||
|
||||
try:
|
||||
os.stat(file_path)
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
except OSError as e:
|
||||
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Config",
|
||||
"RootConfig",
|
||||
"ShardedWorkerHandlingConfig",
|
||||
"RoutableShardedWorkerHandlingConfig",
|
||||
"read_file",
|
||||
]
|
||||
|
||||
@@ -152,3 +152,5 @@ class ShardedWorkerHandlingConfig:
|
||||
|
||||
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
|
||||
def get_instance(self, key: str) -> str: ...
|
||||
|
||||
def read_file(file_path: Any, config_path: Iterable[str]) -> str: ...
|
||||
|
||||
@@ -24,7 +24,7 @@ from ._base import Config, ConfigError
|
||||
_CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
|
||||
|
||||
# Map from canonicalised cache name to cache.
|
||||
_CACHES = {}
|
||||
_CACHES = {} # type: Dict[str, Callable[[float], None]]
|
||||
|
||||
# a lock on the contents of _CACHES
|
||||
_CACHES_LOCK = threading.Lock()
|
||||
@@ -59,7 +59,9 @@ def _canonicalise_cache_name(cache_name: str) -> str:
|
||||
return cache_name.lower()
|
||||
|
||||
|
||||
def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
|
||||
def add_resizable_cache(
|
||||
cache_name: str, cache_resize_callback: Callable[[float], None]
|
||||
):
|
||||
"""Register a cache that's size can dynamically change
|
||||
|
||||
Args:
|
||||
|
||||
@@ -27,3 +27,7 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC2858 (multiple SSO identity providers)
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
# Spaces (MSC1772, MSC2946, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
@@ -404,7 +404,11 @@ def _parse_key_servers(key_servers, federation_verify_certificates):
|
||||
try:
|
||||
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
|
||||
raise ConfigError(
|
||||
"Unable to parse 'trusted_key_servers': {}".format(
|
||||
e.message # noqa: B306, jsonschema.ValidationError.message is a valid attribute
|
||||
)
|
||||
)
|
||||
|
||||
for server in key_servers:
|
||||
server_name = server["server_name"]
|
||||
|
||||
@@ -21,8 +21,10 @@ import threading
|
||||
from string import Template
|
||||
|
||||
import yaml
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.logger import (
|
||||
ILogObserver,
|
||||
LogBeginner,
|
||||
STDLibLogObserver,
|
||||
eventAsText,
|
||||
@@ -227,7 +229,8 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
|
||||
|
||||
threadlocal = threading.local()
|
||||
|
||||
def _log(event):
|
||||
@implementer(ILogObserver)
|
||||
def _log(event: dict) -> None:
|
||||
if "log_text" in event:
|
||||
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
|
||||
return
|
||||
|
||||
@@ -56,7 +56,9 @@ class MetricsConfig(Config):
|
||||
try:
|
||||
check_requirements("sentry")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
|
||||
@@ -15,17 +15,18 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, Optional, Tuple, Type
|
||||
from typing import Iterable, List, Mapping, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.types import Collection, JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
|
||||
@@ -41,7 +42,9 @@ class OIDCConfig(Config):
|
||||
try:
|
||||
check_requirements("oidc")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message) from e
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
) from e
|
||||
|
||||
# check we don't have any duplicate idp_ids now. (The SSO handler will also
|
||||
# check for duplicates when the REST listeners get registered, but that happens
|
||||
@@ -76,6 +79,9 @@ class OIDCConfig(Config):
|
||||
# Note that, if this is changed, users authenticating via that provider
|
||||
# will no longer be recognised as the same user!
|
||||
#
|
||||
# (Use "oidc" here if you are migrating from an old "oidc_config"
|
||||
# configuration.)
|
||||
#
|
||||
# idp_name: A user-facing name for this identity provider, which is used to
|
||||
# offer the user a choice of login mechanisms.
|
||||
#
|
||||
@@ -97,7 +103,26 @@ class OIDCConfig(Config):
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: Required. oauth2 client secret to use.
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
@@ -172,6 +197,24 @@ class OIDCConfig(Config):
|
||||
# which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
# in the ID Token.
|
||||
#
|
||||
# It is possible to configure Synapse to only allow logins if certain attributes
|
||||
# match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted. Additional attributes can be added to
|
||||
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
|
||||
# additional information from the OIDC provider.
|
||||
#
|
||||
# If the OIDC claim is a list, then the attribute must match any value in the list.
|
||||
# Otherwise, it must exactly match the value of the claim. Using the example
|
||||
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
|
||||
# claim MUST contain "admin".
|
||||
#
|
||||
# attribute_requirements:
|
||||
# - attribute: family_name
|
||||
# value: "Stephensson"
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
@@ -204,34 +247,9 @@ class OIDCConfig(Config):
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
# email_template: "{{{{ user.email }}}}"
|
||||
|
||||
# For use with Keycloak
|
||||
#
|
||||
#- idp_id: keycloak
|
||||
# idp_name: Keycloak
|
||||
# issuer: "https://127.0.0.1:8443/auth/realms/my_realm_name"
|
||||
# client_id: "synapse"
|
||||
# client_secret: "copy secret generated in Keycloak UI"
|
||||
# scopes: ["openid", "profile"]
|
||||
|
||||
# For use with Github
|
||||
#
|
||||
#- idp_id: github
|
||||
# idp_name: Github
|
||||
# idp_brand: org.matrix.github
|
||||
# discover: false
|
||||
# issuer: "https://github.com/"
|
||||
# client_id: "your-client-id" # TO BE FILLED
|
||||
# client_secret: "your-client-secret" # TO BE FILLED
|
||||
# authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
# token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
# userinfo_endpoint: "https://api.github.com/user"
|
||||
# scopes: ["read:user"]
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{{{ user.login }}}}"
|
||||
# display_name_template: "{{{{ user.name }}}}"
|
||||
# attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "synapseUsers"
|
||||
""".format(
|
||||
mapping_provider=DEFAULT_USER_MAPPING_PROVIDER
|
||||
)
|
||||
@@ -240,7 +258,7 @@ class OIDCConfig(Config):
|
||||
# jsonschema definition of the configuration settings for an oidc identity provider
|
||||
OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"required": ["issuer", "client_id", "client_secret"],
|
||||
"required": ["issuer", "client_id"],
|
||||
"properties": {
|
||||
"idp_id": {
|
||||
"type": "string",
|
||||
@@ -253,7 +271,12 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"idp_icon": {"type": "string"},
|
||||
"idp_brand": {
|
||||
"type": "string",
|
||||
# MSC2758-style namespaced identifier
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
},
|
||||
"idp_unstable_brand": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"pattern": "^[a-z][a-z0-9_.-]*$",
|
||||
@@ -262,6 +285,30 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"issuer": {"type": "string"},
|
||||
"client_id": {"type": "string"},
|
||||
"client_secret": {"type": "string"},
|
||||
"client_secret_jwt_key": {
|
||||
"type": "object",
|
||||
"required": ["jwt_header"],
|
||||
"oneOf": [
|
||||
{"required": ["key"]},
|
||||
{"required": ["key_file"]},
|
||||
],
|
||||
"properties": {
|
||||
"key": {"type": "string"},
|
||||
"key_file": {"type": "string"},
|
||||
"jwt_header": {
|
||||
"type": "object",
|
||||
"required": ["alg"],
|
||||
"properties": {
|
||||
"alg": {"type": "string"},
|
||||
},
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
"jwt_payload": {
|
||||
"type": "object",
|
||||
"additionalProperties": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"client_auth_method": {
|
||||
"type": "string",
|
||||
# the following list is the same as the keys of
|
||||
@@ -281,6 +328,10 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
},
|
||||
"allow_existing_users": {"type": "boolean"},
|
||||
"user_mapping_provider": {"type": ["object", "null"]},
|
||||
"attribute_requirements": {
|
||||
"type": "array",
|
||||
"items": SsoAttributeRequirement.JSON_SCHEMA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -404,15 +455,36 @@ def _parse_oidc_config_dict(
|
||||
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
|
||||
) from e
|
||||
|
||||
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
|
||||
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
|
||||
if client_secret_jwt_key_config is not None:
|
||||
keyfile = client_secret_jwt_key_config.get("key_file")
|
||||
if keyfile:
|
||||
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
|
||||
else:
|
||||
key = client_secret_jwt_key_config["key"]
|
||||
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
|
||||
key=key,
|
||||
jwt_header=client_secret_jwt_key_config["jwt_header"],
|
||||
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
|
||||
)
|
||||
# parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement
|
||||
attribute_requirements = [
|
||||
SsoAttributeRequirement(**x)
|
||||
for x in oidc_config.get("attribute_requirements", [])
|
||||
]
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
idp_icon=idp_icon,
|
||||
idp_brand=oidc_config.get("idp_brand"),
|
||||
unstable_idp_brand=oidc_config.get("unstable_idp_brand"),
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config["client_secret"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
scopes=oidc_config.get("scopes", ["openid"]),
|
||||
authorization_endpoint=oidc_config.get("authorization_endpoint"),
|
||||
@@ -424,9 +496,22 @@ def _parse_oidc_config_dict(
|
||||
allow_existing_users=oidc_config.get("allow_existing_users", False),
|
||||
user_mapping_provider_class=user_mapping_provider_class,
|
||||
user_mapping_provider_config=user_mapping_provider_config,
|
||||
attribute_requirements=attribute_requirements,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderClientSecretJwtKey:
|
||||
# a pem-encoded signing key
|
||||
key = attr.ib(type=str)
|
||||
|
||||
# properties to include in the JWT header
|
||||
jwt_header = attr.ib(type=Mapping[str, str])
|
||||
|
||||
# properties to include in the JWT payload.
|
||||
jwt_payload = attr.ib(type=Mapping[str, str])
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class OidcProviderConfig:
|
||||
# a unique identifier for this identity provider. Used in the 'user_external_ids'
|
||||
@@ -442,6 +527,9 @@ class OidcProviderConfig:
|
||||
# Optional brand identifier for this IdP.
|
||||
idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# Optional brand identifier for the unstable API (see MSC2858).
|
||||
unstable_idp_brand = attr.ib(type=Optional[str])
|
||||
|
||||
# whether the OIDC discovery mechanism is used to discover endpoints
|
||||
discover = attr.ib(type=bool)
|
||||
|
||||
@@ -452,8 +540,13 @@ class OidcProviderConfig:
|
||||
# oauth2 client id to use
|
||||
client_id = attr.ib(type=str)
|
||||
|
||||
# oauth2 client secret to use
|
||||
client_secret = attr.ib(type=str)
|
||||
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
|
||||
# a secret.
|
||||
client_secret = attr.ib(type=Optional[str])
|
||||
|
||||
# key to use to construct a JWT to use as a client secret. May be `None` if
|
||||
# `client_secret` is set.
|
||||
client_secret_jwt_key = attr.ib(type=Optional[OidcProviderClientSecretJwtKey])
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are 'client_secret_basic', 'client_secret_post' and
|
||||
@@ -493,3 +586,6 @@ class OidcProviderConfig:
|
||||
|
||||
# the config of the user mapping provider
|
||||
user_mapping_provider_config = attr.ib()
|
||||
|
||||
# required attributes to require in userinfo to allow login/registration
|
||||
attribute_requirements = attr.ib(type=List[SsoAttributeRequirement])
|
||||
|
||||
@@ -95,11 +95,11 @@ class RatelimitConfig(Config):
|
||||
|
||||
self.rc_joins_local = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("local", {}),
|
||||
defaults={"per_second": 0.1, "burst_count": 3},
|
||||
defaults={"per_second": 0.1, "burst_count": 10},
|
||||
)
|
||||
self.rc_joins_remote = RateLimitConfig(
|
||||
config.get("rc_joins", {}).get("remote", {}),
|
||||
defaults={"per_second": 0.01, "burst_count": 3},
|
||||
defaults={"per_second": 0.01, "burst_count": 10},
|
||||
)
|
||||
|
||||
# Ratelimit cross-user key requests:
|
||||
@@ -187,10 +187,10 @@ class RatelimitConfig(Config):
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 3
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_3pid_validation:
|
||||
# per_second: 0.003
|
||||
|
||||
@@ -176,7 +176,9 @@ class ContentRepositoryConfig(Config):
|
||||
check_requirements("url_preview")
|
||||
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
raise ConfigError(
|
||||
|
||||
@@ -76,7 +76,9 @@ class SAML2Config(Config):
|
||||
try:
|
||||
check_requirements("saml2")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
|
||||
@@ -841,8 +841,7 @@ class ServerConfig(Config):
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
|
||||
@@ -13,10 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from ._base import Config
|
||||
|
||||
ROOM_STATS_DISABLED_WARN = """\
|
||||
WARNING: room/user statistics have been disabled via the stats.enabled
|
||||
configuration setting. This means that certain features (such as the room
|
||||
directory) will not operate correctly. Future versions of Synapse may ignore
|
||||
this setting.
|
||||
|
||||
To fix this warning, remove the stats.enabled setting from your configuration
|
||||
file.
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatsConfig(Config):
|
||||
"""Stats Configuration
|
||||
@@ -28,30 +40,29 @@ class StatsConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.stats_enabled = True
|
||||
self.stats_bucket_size = 86400 * 1000
|
||||
self.stats_retention = sys.maxsize
|
||||
stats_config = config.get("stats", None)
|
||||
if stats_config:
|
||||
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
|
||||
self.stats_bucket_size = self.parse_duration(
|
||||
stats_config.get("bucket_size", "1d")
|
||||
)
|
||||
self.stats_retention = self.parse_duration(
|
||||
stats_config.get("retention", "%ds" % (sys.maxsize,))
|
||||
)
|
||||
if not self.stats_enabled:
|
||||
logger.warning(ROOM_STATS_DISABLED_WARN)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
"""
|
||||
|
||||
@@ -39,7 +39,9 @@ class TracerConfig(Config):
|
||||
try:
|
||||
check_requirements("opentracing")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
# The tracer is enabled so sanitize the config
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ def _context_info_cb(ssl_connection, where, ret):
|
||||
# ... we further assume that SSLClientConnectionCreator has set the
|
||||
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
|
||||
tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
except BaseException: # taken from the twisted implementation
|
||||
logger.exception("Error during info_callback")
|
||||
f = Failure()
|
||||
tls_protocol.failVerification(f)
|
||||
@@ -219,7 +219,7 @@ class SSLClientConnectionCreator:
|
||||
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||
# tls_protocol so that the SSL context's info callback has something to
|
||||
# call to do the cert verification.
|
||||
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
|
||||
tls_protocol._synapse_tls_verifier = self._verifier
|
||||
return connection
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ class DefaultDictProperty(DictProperty):
|
||||
|
||||
|
||||
class _EventInternalMetadata:
|
||||
__slots__ = ["_dict", "stream_ordering"]
|
||||
__slots__ = ["_dict", "stream_ordering", "outlier"]
|
||||
|
||||
def __init__(self, internal_metadata_dict: JsonDict):
|
||||
# we have to copy the dict, because it turns out that the same dict is
|
||||
@@ -108,7 +108,10 @@ class _EventInternalMetadata:
|
||||
# the stream ordering of this event. None, until it has been persisted.
|
||||
self.stream_ordering = None # type: Optional[int]
|
||||
|
||||
outlier = DictProperty("outlier") # type: bool
|
||||
# whether this event is an outlier (ie, whether we have the state at that point
|
||||
# in the DAG)
|
||||
self.outlier = False
|
||||
|
||||
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
|
||||
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
|
||||
recheck_redaction = DictProperty("recheck_redaction") # type: bool
|
||||
@@ -129,7 +132,7 @@ class _EventInternalMetadata:
|
||||
return dict(self._dict)
|
||||
|
||||
def is_outlier(self) -> bool:
|
||||
return self._dict.get("outlier", False)
|
||||
return self.outlier
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
@@ -27,6 +28,8 @@ if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
import synapse.server
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
@@ -190,6 +193,7 @@ class SpamChecker:
|
||||
email_threepid: Optional[dict],
|
||||
username: Optional[str],
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> RegistrationBehaviour:
|
||||
"""Checks if we should allow the given registration request.
|
||||
|
||||
@@ -198,6 +202,9 @@ class SpamChecker:
|
||||
username: The request user name, if any
|
||||
request_info: List of tuples of user agent and IP that
|
||||
were used during the registration process.
|
||||
auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml",
|
||||
"cas". If any. Note this does not include users registered
|
||||
via a password provider.
|
||||
|
||||
Returns:
|
||||
Enum for how the request should be handled
|
||||
@@ -208,9 +215,25 @@ class SpamChecker:
|
||||
# spam checker
|
||||
checker = getattr(spam_checker, "check_registration_for_spam", None)
|
||||
if checker:
|
||||
behaviour = await maybe_awaitable(
|
||||
checker(email_threepid, username, request_info)
|
||||
)
|
||||
# Provide auth_provider_id if the function supports it
|
||||
checker_args = inspect.signature(checker)
|
||||
if len(checker_args.parameters) == 4:
|
||||
d = checker(
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
)
|
||||
elif len(checker_args.parameters) == 3:
|
||||
d = checker(email_threepid, username, request_info)
|
||||
else:
|
||||
logger.error(
|
||||
"Invalid signature for %s.check_registration_for_spam. Denying registration",
|
||||
spam_checker.__module__,
|
||||
)
|
||||
return RegistrationBehaviour.DENY
|
||||
|
||||
behaviour = await maybe_awaitable(d)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
|
||||
@@ -13,12 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Callable, Union
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.types import Requester, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class ThirdPartyEventRules:
|
||||
"""Allows server admins to provide a Python module implementing an extra
|
||||
@@ -28,7 +31,7 @@ class ThirdPartyEventRules:
|
||||
behaviours.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.third_party_rules = None
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
@@ -95,10 +98,9 @@ class ThirdPartyEventRules:
|
||||
if self.third_party_rules is None:
|
||||
return True
|
||||
|
||||
ret = await self.third_party_rules.on_create_room(
|
||||
return await self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
return ret
|
||||
|
||||
async def check_threepid_can_be_invited(
|
||||
self, medium: str, address: str, room_id: str
|
||||
@@ -119,10 +121,9 @@ class ThirdPartyEventRules:
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
ret = await self.third_party_rules.check_threepid_can_be_invited(
|
||||
return await self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events
|
||||
)
|
||||
return ret
|
||||
|
||||
async def check_visibility_can_be_modified(
|
||||
self, room_id: str, new_visibility: str
|
||||
@@ -143,7 +144,7 @@ class ThirdPartyEventRules:
|
||||
check_func = getattr(
|
||||
self.third_party_rules, "check_visibility_can_be_modified", None
|
||||
)
|
||||
if not check_func or not isinstance(check_func, Callable):
|
||||
if not check_func or not callable(check_func):
|
||||
return True
|
||||
|
||||
state_events = await self._get_state_map_for_room(room_id)
|
||||
|
||||
@@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from . import EventBase
|
||||
|
||||
@@ -54,6 +55,8 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
event.internal_metadata.stream_ordering
|
||||
)
|
||||
|
||||
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
|
||||
|
||||
# Mark the event as redacted
|
||||
pruned_event.internal_metadata.redacted = True
|
||||
|
||||
@@ -400,10 +403,19 @@ class EventClientSerializer:
|
||||
# If there is an edit replace the content, preserving existing
|
||||
# relations.
|
||||
|
||||
# Ensure we take copies of the edit content, otherwise we risk modifying
|
||||
# the original event.
|
||||
edit_content = edit.content.copy()
|
||||
|
||||
# Unfreeze the event content if necessary, so that we may modify it below
|
||||
edit_content = unfreeze(edit_content)
|
||||
serialized_event["content"] = edit_content.get("m.new_content", {})
|
||||
|
||||
# Check for existing relations
|
||||
relations = event.content.get("m.relates_to")
|
||||
serialized_event["content"] = edit.content.get("m.new_content", {})
|
||||
if relations:
|
||||
serialized_event["content"]["m.relates_to"] = relations
|
||||
# Keep the relations, ensuring we use a dict copy of the original
|
||||
serialized_event["content"]["m.relates_to"] = relations.copy()
|
||||
else:
|
||||
serialized_event["content"].pop("m.relates_to", None)
|
||||
|
||||
|
||||
@@ -27,11 +27,13 @@ from typing import (
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -62,7 +64,7 @@ from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -455,6 +457,7 @@ class FederationClient(FederationBase):
|
||||
description: str,
|
||||
destinations: Iterable[str],
|
||||
callback: Callable[[str], Awaitable[T]],
|
||||
failover_on_unknown_endpoint: bool = False,
|
||||
) -> T:
|
||||
"""Try an operation on a series of servers, until it succeeds
|
||||
|
||||
@@ -474,6 +477,10 @@ class FederationClient(FederationBase):
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
suppressed if the exception is an InvalidResponseError.
|
||||
|
||||
failover_on_unknown_endpoint: if True, we will try other servers if it looks
|
||||
like a server doesn't support the endpoint. This is typically useful
|
||||
if the endpoint in question is new or experimental.
|
||||
|
||||
Returns:
|
||||
The result of callback, if it succeeds
|
||||
|
||||
@@ -493,16 +500,31 @@ class FederationClient(FederationBase):
|
||||
except UnsupportedRoomVersionError:
|
||||
raise
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
e.code,
|
||||
e.args[0],
|
||||
)
|
||||
synapse_error = e.to_synapse_error()
|
||||
failover = False
|
||||
|
||||
if 500 <= e.code < 600:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint:
|
||||
# there is no good way to detect an "unknown" endpoint. Dendrite
|
||||
# returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
if e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
):
|
||||
failover = True
|
||||
|
||||
if not failover:
|
||||
raise synapse_error from e
|
||||
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
e.code,
|
||||
e.args[0],
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=True
|
||||
@@ -1042,3 +1064,141 @@ class FederationClient(FederationBase):
|
||||
# If we don't manage to find it, return None. It's not an error if a
|
||||
# server doesn't give it to us.
|
||||
return None
|
||||
|
||||
async def get_space_summary(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
room_id: str,
|
||||
suggested_only: bool,
|
||||
max_rooms_per_space: Optional[int],
|
||||
exclude_rooms: List[str],
|
||||
) -> "FederationSpaceSummaryResult":
|
||||
"""
|
||||
Call other servers to get a summary of the given space
|
||||
|
||||
|
||||
Args:
|
||||
destinations: The remote servers. We will try them in turn, omitting any
|
||||
that have been blacklisted.
|
||||
|
||||
room_id: ID of the space to be queried
|
||||
|
||||
suggested_only: If true, ask the remote server to only return children
|
||||
with the "suggested" flag set
|
||||
|
||||
max_rooms_per_space: A limit on the number of children to return for each
|
||||
space
|
||||
|
||||
exclude_rooms: A list of room IDs to tell the remote server to skip
|
||||
|
||||
Returns:
|
||||
a parsed FederationSpaceSummaryResult
|
||||
|
||||
Raises:
|
||||
SynapseError if we were unable to get a valid summary from any of the
|
||||
remote servers
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> FederationSpaceSummaryResult:
|
||||
res = await self.transport_layer.get_space_summary(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
suggested_only=suggested_only,
|
||||
max_rooms_per_space=max_rooms_per_space,
|
||||
exclude_rooms=exclude_rooms,
|
||||
)
|
||||
|
||||
try:
|
||||
return FederationSpaceSummaryResult.from_json_dict(res)
|
||||
except ValueError as e:
|
||||
raise InvalidResponseError(str(e))
|
||||
|
||||
return await self._try_destination_list(
|
||||
"fetch space summary",
|
||||
destinations,
|
||||
send_request,
|
||||
failover_on_unknown_endpoint=True,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class FederationSpaceSummaryEventResult:
|
||||
"""Represents a single event in the result of a successful get_space_summary call.
|
||||
|
||||
It's essentially just a serialised event object, but we do a bit of parsing and
|
||||
validation in `from_json_dict` and store some of the validated properties in
|
||||
object attributes.
|
||||
"""
|
||||
|
||||
event_type = attr.ib(type=str)
|
||||
state_key = attr.ib(type=str)
|
||||
via = attr.ib(type=Sequence[str])
|
||||
|
||||
# the raw data, including the above keys
|
||||
data = attr.ib(type=JsonDict)
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult":
|
||||
"""Parse an event within the result of a /spaces/ request
|
||||
|
||||
Args:
|
||||
d: json object to be parsed
|
||||
|
||||
Raises:
|
||||
ValueError if d is not a valid event
|
||||
"""
|
||||
|
||||
event_type = d.get("type")
|
||||
if not isinstance(event_type, str):
|
||||
raise ValueError("Invalid event: 'event_type' must be a str")
|
||||
|
||||
state_key = d.get("state_key")
|
||||
if not isinstance(state_key, str):
|
||||
raise ValueError("Invalid event: 'state_key' must be a str")
|
||||
|
||||
content = d.get("content")
|
||||
if not isinstance(content, dict):
|
||||
raise ValueError("Invalid event: 'content' must be a dict")
|
||||
|
||||
via = content.get("via")
|
||||
if not isinstance(via, Sequence):
|
||||
raise ValueError("Invalid event: 'via' must be a list")
|
||||
if any(not isinstance(v, str) for v in via):
|
||||
raise ValueError("Invalid event: 'via' must be a list of strings")
|
||||
|
||||
return cls(event_type, state_key, via, d)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class FederationSpaceSummaryResult:
|
||||
"""Represents the data returned by a successful get_space_summary call."""
|
||||
|
||||
rooms = attr.ib(type=Sequence[JsonDict])
|
||||
events = attr.ib(type=Sequence[FederationSpaceSummaryEventResult])
|
||||
|
||||
@classmethod
|
||||
def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult":
|
||||
"""Parse the result of a /spaces/ request
|
||||
|
||||
Args:
|
||||
d: json object to be parsed
|
||||
|
||||
Raises:
|
||||
ValueError if d is not a valid /spaces/ response
|
||||
"""
|
||||
rooms = d.get("rooms")
|
||||
if not isinstance(rooms, Sequence):
|
||||
raise ValueError("'rooms' must be a list")
|
||||
if any(not isinstance(r, dict) for r in rooms):
|
||||
raise ValueError("Invalid room in 'rooms' list")
|
||||
|
||||
events = d.get("events")
|
||||
if not isinstance(events, Sequence):
|
||||
raise ValueError("'events' must be a list")
|
||||
if any(not isinstance(e, dict) for e in events):
|
||||
raise ValueError("Invalid event in 'events' list")
|
||||
parsed_events = [
|
||||
FederationSpaceSummaryEventResult.from_json_dict(e) for e in events
|
||||
]
|
||||
|
||||
return cls(rooms, parsed_events)
|
||||
|
||||
@@ -22,6 +22,7 @@ from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
@@ -34,7 +35,7 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.constants import EduTypes, EventTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -62,7 +63,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -90,16 +91,15 @@ pdu_process_time = Histogram(
|
||||
"Time taken to process an event",
|
||||
)
|
||||
|
||||
|
||||
last_pdu_age_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_age",
|
||||
"The age (in seconds) of the last PDU successfully received from the given domain",
|
||||
last_pdu_ts_metric = Gauge(
|
||||
"synapse_federation_last_received_pdu_time",
|
||||
"The timestamp of the last PDU which was successfully received from the given domain",
|
||||
labelnames=("server_name",),
|
||||
)
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
@@ -112,14 +112,15 @@ class FederationServer(FederationBase):
|
||||
# with FederationHandlerRegistry.
|
||||
hs.get_directory_handler()
|
||||
|
||||
self._federation_ratelimiter = hs.get_federation_ratelimiter()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
|
||||
# origins that we are currently processing a transaction from.
|
||||
# a dict from origin to txn id.
|
||||
self._active_transactions = {} # type: Dict[str, str]
|
||||
|
||||
# We cache results for transaction with the same ID
|
||||
self._transaction_resp_cache = ResponseCache(
|
||||
hs, "fed_txn_handler", timeout_ms=30000
|
||||
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
@@ -129,10 +130,10 @@ class FederationServer(FederationBase):
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(
|
||||
hs, "state_resp", timeout_ms=30000
|
||||
hs.get_clock(), "state_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
self._state_ids_resp_cache = ResponseCache(
|
||||
hs, "state_ids_resp", timeout_ms=30000
|
||||
hs.get_clock(), "state_ids_resp", timeout_ms=30000
|
||||
) # type: ResponseCache[Tuple[str, str]]
|
||||
|
||||
self._federation_metrics_domains = (
|
||||
@@ -169,6 +170,33 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction_id)
|
||||
|
||||
# Reject malformed transactions early: reject if too many PDUs/EDUs
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
return 400, {}
|
||||
|
||||
# we only process one transaction from each origin at a time. We need to do
|
||||
# this check here, rather than in _on_incoming_transaction_inner so that we
|
||||
# don't cache the rejection in _transaction_resp_cache (so that if the txn
|
||||
# arrives again later, we can process it).
|
||||
current_transaction = self._active_transactions.get(origin)
|
||||
if current_transaction and current_transaction != transaction_id:
|
||||
logger.warning(
|
||||
"Received another txn %s from %s while still processing %s",
|
||||
transaction_id,
|
||||
origin,
|
||||
current_transaction,
|
||||
)
|
||||
return 429, {
|
||||
"errcode": Codes.UNKNOWN,
|
||||
"error": "Too many concurrent transactions",
|
||||
}
|
||||
|
||||
# CRITICAL SECTION: we must now not await until we populate _active_transactions
|
||||
# in _on_incoming_transaction_inner.
|
||||
|
||||
# We wrap in a ResponseCache so that we de-duplicate retried
|
||||
# transactions.
|
||||
return await self._transaction_resp_cache.wrap(
|
||||
@@ -182,26 +210,18 @@ class FederationServer(FederationBase):
|
||||
async def _on_incoming_transaction_inner(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
# Use a linearizer to ensure that transactions from a remote are
|
||||
# processed in order.
|
||||
with await self._transaction_linearizer.queue(origin):
|
||||
# We rate limit here *after* we've queued up the incoming requests,
|
||||
# so that we don't fill up the ratelimiter with blocked requests.
|
||||
#
|
||||
# This is important as the ratelimiter allows N concurrent requests
|
||||
# at a time, and only starts ratelimiting if there are more requests
|
||||
# than that being processed at a time. If we queued up requests in
|
||||
# the linearizer/response cache *after* the ratelimiting then those
|
||||
# queued up requests would count as part of the allowed limit of N
|
||||
# concurrent requests.
|
||||
with self._federation_ratelimiter.ratelimit(origin) as d:
|
||||
await d
|
||||
# CRITICAL SECTION: the first thing we must do (before awaiting) is
|
||||
# add an entry to _active_transactions.
|
||||
assert origin not in self._active_transactions
|
||||
self._active_transactions[origin] = transaction.transaction_id # type: ignore
|
||||
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
try:
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
del self._active_transactions[origin]
|
||||
|
||||
async def _handle_incoming_transaction(
|
||||
self, origin: str, transaction: Transaction, request_time: int
|
||||
@@ -227,19 +247,6 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
|
||||
|
||||
# Reject if PDU count > 50 or EDU count > 100
|
||||
if len(transaction.pdus) > 50 or ( # type: ignore
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
||||
):
|
||||
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
|
||||
response = {}
|
||||
await self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return 400, response
|
||||
|
||||
# We process PDUs and EDUs in parallel. This is important as we don't
|
||||
# want to block things like to device messages from reaching clients
|
||||
# behind the potentially expensive handling of PDUs.
|
||||
@@ -335,42 +342,48 @@ class FederationServer(FederationBase):
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
async def process_pdus_for_room(room_id: str):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
with nested_logging_context(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warning(
|
||||
"Ignoring PDUs for room %s from banned server", room_id
|
||||
)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
pdu_results[pdu.event_id] = await process_pdu(pdu)
|
||||
|
||||
async def process_pdu(pdu: EventBase) -> JsonDict:
|
||||
event_id = pdu.event_id
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
return {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
return {"error": str(e)}
|
||||
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if newest_pdu_ts and origin in self._federation_metrics_domains:
|
||||
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
|
||||
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
|
||||
last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
|
||||
|
||||
return pdu_results
|
||||
|
||||
@@ -448,18 +461,22 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def _on_state_ids_request_compute(self, room_id, event_id):
|
||||
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids)
|
||||
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||
|
||||
async def _on_context_state_request_compute(
|
||||
self, room_id: str, event_id: str
|
||||
) -> Dict[str, list]:
|
||||
if event_id:
|
||||
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||
pdus = await self.handler.get_state_for_pdu(
|
||||
room_id, event_id
|
||||
) # type: Iterable[EventBase]
|
||||
else:
|
||||
pdus = (await self.state.get_current_state(room_id)).values()
|
||||
|
||||
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
auth_chain = await self.store.get_auth_chain(
|
||||
room_id, [pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
@@ -710,27 +727,6 @@ class FederationServer(FederationBase):
|
||||
if the event was unacceptable for any other reason (eg, too large,
|
||||
too many prev_events, couldn't find the prev_events)
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.sender):
|
||||
# We continue to accept join events from any server; this is
|
||||
# necessary for the federation join dance to work correctly.
|
||||
# (When we join over federation, the "helper" server is
|
||||
# responsible for sending out the join event, rather than the
|
||||
# origin. See bug #1893. This is also true for some third party
|
||||
# invites).
|
||||
if not (
|
||||
pdu.type == "m.room.member"
|
||||
and pdu.content
|
||||
and pdu.content.get("membership", None)
|
||||
in (Membership.JOIN, Membership.INVITE)
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s", pdu.event_id, origin
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = await self.store.get_room_version(pdu.room_id)
|
||||
@@ -863,7 +859,9 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
|
||||
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
|
||||
self.query_handlers = (
|
||||
{}
|
||||
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
|
||||
|
||||
# Map from type to instance names that we should route EDU handling to.
|
||||
# We randomly choose one instance from the list to route to for each new
|
||||
@@ -897,7 +895,7 @@ class FederationHandlerRegistry:
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(
|
||||
self, query_type: str, handler: Callable[[dict], defer.Deferred]
|
||||
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
|
||||
):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation query of the given type.
|
||||
@@ -936,10 +934,6 @@ class FederationHandlerRegistry:
|
||||
):
|
||||
return
|
||||
|
||||
# Temporary patch to drop cross-user key share requests
|
||||
if edu_type == "m.room_key_request":
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
@@ -974,7 +968,7 @@ class FederationHandlerRegistry:
|
||||
# Oh well, let's just log and move on.
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
async def on_query(self, query_type: str, args: dict):
|
||||
async def on_query(self, query_type: str, args: dict) -> JsonDict:
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return await handler(args)
|
||||
|
||||
@@ -31,25 +31,39 @@ Events are replicated via a separate events stream.
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List, Tuple, Type
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sized,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue:
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -58,7 +72,7 @@ class FederationRemoteSendQueue:
|
||||
# We may have multiple federation sender instances, so we need to track
|
||||
# their positions separately.
|
||||
self._sender_instances = hs.config.worker.federation_shard_config.instances
|
||||
self._sender_positions = {}
|
||||
self._sender_positions = {} # type: Dict[str, int]
|
||||
|
||||
# Pending presence map user_id -> UserPresenceState
|
||||
self.presence_map = {} # type: Dict[str, UserPresenceState]
|
||||
@@ -71,7 +85,7 @@ class FederationRemoteSendQueue:
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, List[str]]]
|
||||
) # type: SortedDict[int, Tuple[str, Iterable[str]]]
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
|
||||
@@ -94,7 +108,7 @@ class FederationRemoteSendQueue:
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
"synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"",
|
||||
@@ -115,13 +129,13 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self):
|
||||
def _next_pos(self) -> int:
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self):
|
||||
def _clear_queue(self) -> None:
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
@@ -138,7 +152,7 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete):
|
||||
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
@@ -188,13 +202,18 @@ class FederationRemoteSendQueue:
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
def notify_new_events(self, max_token):
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
pass
|
||||
# This should never get called.
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_and_send_edu(self, destination, edu_type, content, key=None):
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
@@ -218,38 +237,39 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_read_receipt(self, receipt):
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
receipt (synapse.types.ReadReceipt):
|
||||
receipt:
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
return defer.succeed(None)
|
||||
|
||||
def send_presence(self, states):
|
||||
def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states (list(UserPresenceState))
|
||||
states
|
||||
"""
|
||||
pos = self._next_pos()
|
||||
|
||||
# We only want to send presence for our own users, so lets always just
|
||||
# filter here just in case.
|
||||
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
|
||||
local_states = [s for s in states if self.is_mine_id(s.user_id)]
|
||||
|
||||
self.presence_map.update({state.user_id: state for state in local_states})
|
||||
self.presence_changed[pos] = [state.user_id for state in local_states]
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_presence_to_destinations(self, states, destinations):
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states (list[UserPresenceState])
|
||||
destinations (list[str])
|
||||
states
|
||||
destinations
|
||||
"""
|
||||
for state in states:
|
||||
pos = self._next_pos()
|
||||
@@ -258,15 +278,18 @@ class FederationRemoteSendQueue:
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
def get_current_token(self):
|
||||
def wake_destination(self, server: str) -> None:
|
||||
pass
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self.pos - 1
|
||||
|
||||
def federation_ack(self, instance_name, token):
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
if self._sender_instances:
|
||||
# If we have configured multiple federation sender instances we need
|
||||
# to track their positions separately, and only clear the queue up
|
||||
@@ -504,13 +527,16 @@ ParsedFederationStreamData = namedtuple(
|
||||
)
|
||||
|
||||
|
||||
def process_rows_for_federation(transaction_queue, rows):
|
||||
def process_rows_for_federation(
|
||||
transaction_queue: FederationSender,
|
||||
rows: List[FederationStream.FederationStreamRow],
|
||||
) -> None:
|
||||
"""Parse a list of rows from the federation stream and put them in the
|
||||
transaction queue ready for sending to the relevant homeservers.
|
||||
|
||||
Args:
|
||||
transaction_queue (FederationSender)
|
||||
rows (list(synapse.replication.tcp.streams.federation.FederationStream.FederationStreamRow))
|
||||
transaction_queue
|
||||
rows
|
||||
"""
|
||||
|
||||
# The federation stream contains a bunch of different types of
|
||||
|
||||
@@ -13,14 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import logging
|
||||
from typing import Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
@@ -40,9 +40,12 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
@@ -65,8 +68,91 @@ CATCH_UP_STARTUP_DELAY_SEC = 15
|
||||
CATCH_UP_STARTUP_INTERVAL_SEC = 5
|
||||
|
||||
|
||||
class FederationSender:
|
||||
def __init__(self, hs: "synapse.server.HomeServer"):
|
||||
class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
|
||||
Args:
|
||||
receipt: receipt to be sent
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
triggers a background task to process them and send out the transactions.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
|
||||
Args:
|
||||
destinations:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
destination: name of server to send to
|
||||
edu_type: type of EDU to send
|
||||
content: content of EDU
|
||||
key: clobbering key for this edu
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
This is mainly useful if the remote server has been down and we think it
|
||||
might have come back.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_current_token(self) -> int:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FederationSender(AbstractFederationSender):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.server_name = hs.hostname
|
||||
|
||||
@@ -432,7 +518,7 @@ class FederationSender:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
@preserve_fn # the caller should not yield on this
|
||||
async def send_presence(self, states: List[UserPresenceState]):
|
||||
async def send_presence(self, states: List[UserPresenceState]) -> None:
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
||||
This actually queues up the presence states ready for sending and
|
||||
@@ -494,7 +580,7 @@ class FederationSender:
|
||||
self._get_per_destination_queue(destination).send_presence(states)
|
||||
|
||||
@measure_func("txnqueue._process_presence")
|
||||
async def _process_presence_inner(self, states: List[UserPresenceState]):
|
||||
async def _process_presence_inner(self, states: List[UserPresenceState]) -> None:
|
||||
"""Given a list of states populate self.pending_presence_by_dest and
|
||||
poke to send a new transaction to each destination
|
||||
"""
|
||||
@@ -516,9 +602,9 @@ class FederationSender:
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: dict,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Construct an Edu object, and queue it for sending
|
||||
|
||||
Args:
|
||||
@@ -545,7 +631,7 @@ class FederationSender:
|
||||
|
||||
self.send_edu(edu, key)
|
||||
|
||||
def send_edu(self, edu: Edu, key: Optional[Hashable]):
|
||||
def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None:
|
||||
"""Queue an EDU for sending
|
||||
|
||||
Args:
|
||||
@@ -563,7 +649,7 @@ class FederationSender:
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str):
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
if destination == self.server_name:
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
@@ -575,7 +661,7 @@ class FederationSender:
|
||||
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
|
||||
def wake_destination(self, destination: str):
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
This is mainly useful if the remote server has been down and we think it
|
||||
@@ -599,6 +685,10 @@ class FederationSender:
|
||||
# to a worker.
|
||||
return 0
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
# It is not expected that this gets called on FederationSender.
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
async def get_replication_rows(
|
||||
instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
@@ -607,7 +697,7 @@ class FederationSender:
|
||||
# to a worker.
|
||||
return [], 0, False
|
||||
|
||||
async def _wake_destinations_needing_catchup(self):
|
||||
async def _wake_destinations_needing_catchup(self) -> None:
|
||||
"""
|
||||
Wakes up destinations that need catch-up and are not currently being
|
||||
backed off from.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user