Compare commits
188 Commits
erikj/effi
...
v1.92.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1296e471c3 | ||
|
|
622463636c | ||
|
|
4a5bf74372 | ||
|
|
efe778a0b8 | ||
|
|
7e98d382f9 | ||
|
|
fd50a9b47c | ||
|
|
55c20da4a3 | ||
|
|
9de615b3aa | ||
|
|
c9282baf03 | ||
|
|
1940d990a3 | ||
|
|
a2b8814d64 | ||
|
|
79aa26936f | ||
|
|
d77154be01 | ||
|
|
0425dd28f4 | ||
|
|
d35bed8369 | ||
|
|
dcb2778341 | ||
|
|
721346631e | ||
|
|
f84baecb6f | ||
|
|
1cd0715a0f | ||
|
|
748c38921c | ||
|
|
4382d57640 | ||
|
|
8065eea6c7 | ||
|
|
e9eb26e3af | ||
|
|
dcd3698e1f | ||
|
|
b85c3485b1 | ||
|
|
93f2fdd8d1 | ||
|
|
6525fd65ee | ||
|
|
ed5e8a77ca | ||
|
|
3de82bb2af | ||
|
|
a2e0d4cd60 | ||
|
|
05d824526a | ||
|
|
8c56e18e47 | ||
|
|
ebd8374fb5 | ||
|
|
62a1a9be52 | ||
|
|
e9235d92f2 | ||
|
|
9ec3da06da | ||
|
|
001fc7bd19 | ||
|
|
63b51ef3fb | ||
|
|
2d72367367 | ||
|
|
692ee2af19 | ||
|
|
40901af5e0 | ||
|
|
1bf143699c | ||
|
|
501da8ecd8 | ||
|
|
224c2bbcfa | ||
|
|
4379d3ef63 | ||
|
|
1511a55539 | ||
|
|
c0bbad8a96 | ||
|
|
743860e6a6 | ||
|
|
e54c1d4ed3 | ||
|
|
84f441f88f | ||
|
|
ed6de4b2d4 | ||
|
|
82699428e3 | ||
|
|
fcf7a5759e | ||
|
|
a8a46b1336 | ||
|
|
5c9402b9fd | ||
|
|
daf11e26ef | ||
|
|
5856a8ba42 | ||
|
|
aeeca2a62e | ||
|
|
efdb87c898 | ||
|
|
5427cc20b9 | ||
|
|
e691243e19 | ||
|
|
0538e3e2db | ||
|
|
e3333bacff | ||
|
|
851cbdcb57 | ||
|
|
33fa82a34c | ||
|
|
23f88f9c59 | ||
|
|
020ff1afe3 | ||
|
|
7064b4bcf3 | ||
|
|
18279631e9 | ||
|
|
85118420a2 | ||
|
|
ec662bbe41 | ||
|
|
4adaba9acf | ||
|
|
7cd79ce051 | ||
|
|
86ecd341ec | ||
|
|
873971a8b9 | ||
|
|
da162cbe4e | ||
|
|
19a1cda084 | ||
|
|
dffe095642 | ||
|
|
3b3fed7229 | ||
|
|
3f17178728 | ||
|
|
803f63df1c | ||
|
|
0ba17777be | ||
|
|
69048f7b48 | ||
|
|
8aa5479986 | ||
|
|
b657e89005 | ||
|
|
bc72d803d5 | ||
|
|
6d7c63fcc6 | ||
|
|
7dbac123f9 | ||
|
|
d6ae4041a4 | ||
|
|
358896e1b8 | ||
|
|
79c349dfb8 | ||
|
|
1e5a0e07a7 | ||
|
|
35d260d065 | ||
|
|
07c0875aa5 | ||
|
|
406ff3eb62 | ||
|
|
bd558a6dc3 | ||
|
|
2d15e39684 | ||
|
|
54317d34b7 | ||
|
|
6130afb862 | ||
|
|
0aba4a4eaa | ||
|
|
54a51ff6c1 | ||
|
|
eb0dbab15b | ||
|
|
0377cb4fab | ||
|
|
8a4fb7a6ba | ||
|
|
8c3bcea2da | ||
|
|
4513b36a75 | ||
|
|
47c629bb27 | ||
|
|
ad3f43be9a | ||
|
|
4347473946 | ||
|
|
29638220ab | ||
|
|
837f28ce74 | ||
|
|
4ce32ade5a | ||
|
|
6fc411c7bf | ||
|
|
e21ff0f048 | ||
|
|
b80ff1602e | ||
|
|
d834a80a12 | ||
|
|
9ff84bccbb | ||
|
|
614efc488b | ||
|
|
7f4b413690 | ||
|
|
efd4d06d76 | ||
|
|
dac97642e4 | ||
|
|
0328b56468 | ||
|
|
4581809846 | ||
|
|
3dfe5c0270 | ||
|
|
8e09b8aecb | ||
|
|
a476d5048b | ||
|
|
f3dc6dc19f | ||
|
|
8af3f33d84 | ||
|
|
81a6f8c9ae | ||
|
|
9d3713d6d5 | ||
|
|
b57630c507 | ||
|
|
340f08c6f7 | ||
|
|
8da3c2185b | ||
|
|
eca592b121 | ||
|
|
34b5db1fbc | ||
|
|
ec8499206e | ||
|
|
4f6da0dba0 | ||
|
|
84ae2e3f6f | ||
|
|
d98a43d922 | ||
|
|
0a5f4f7665 | ||
|
|
f0a860908b | ||
|
|
9c462f18a4 | ||
|
|
4f5bccbbba | ||
|
|
01a45869f0 | ||
|
|
ca5d5de79b | ||
|
|
a51b0862a1 | ||
|
|
8fe1fd906a | ||
|
|
90ad836ed8 | ||
|
|
5eb3fd785b | ||
|
|
7cbb2a00d1 | ||
|
|
a4102d2a5f | ||
|
|
190c990a76 | ||
|
|
b7695ac388 | ||
|
|
1fb5a7ad5d | ||
|
|
fa2c116bef | ||
|
|
e02f4b7de2 | ||
|
|
21407c6709 | ||
|
|
ae55cc1e6b | ||
|
|
0c6142c4a1 | ||
|
|
fee0195b27 | ||
|
|
76b2218599 | ||
|
|
ea4ece3fcc | ||
|
|
68b2611783 | ||
|
|
a719b703d9 | ||
|
|
a461f1f846 | ||
|
|
f9f3e89354 | ||
|
|
f98f4f2e16 | ||
|
|
58f8305114 | ||
|
|
96529c4236 | ||
|
|
6dc019d9dd | ||
|
|
8d2a5586f7 | ||
|
|
76e392b0fa | ||
|
|
d4ea465496 | ||
|
|
8ebfd577e2 | ||
|
|
dbee081d14 | ||
|
|
99b7b801c3 | ||
|
|
641ff9ef7e | ||
|
|
05f8dada8b | ||
|
|
654902a758 | ||
|
|
4a711bf379 | ||
|
|
fc566cdf0a | ||
|
|
3b6208b835 | ||
|
|
3b8348b06e | ||
|
|
5c7364fea5 | ||
|
|
f08d05dd2c | ||
|
|
e1fa42249c | ||
|
|
fc1e534e41 | ||
|
|
835174180b |
@@ -47,10 +47,9 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11")
|
||||
for version in ("3.9", "3.10", "3.11", "3.12.0-rc.1")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
|
||||
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -29,6 +29,16 @@ jobs:
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
|
||||
shell: bash
|
||||
run: |
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -61,7 +71,9 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
labels: |
|
||||
gitsha1=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
4
.github/workflows/latest_deps.yml
vendored
4
.github/workflows/latest_deps.yml
vendored
@@ -57,8 +57,8 @@ jobs:
|
||||
# `pip install matrix-synapse[all]` as closely as possible.
|
||||
- run: poetry update --no-dev
|
||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- name: Remove unhelpful options from mypy config
|
||||
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
needs: check_repo
|
||||
|
||||
9
.github/workflows/twisted_trunk.yml
vendored
9
.github/workflows/twisted_trunk.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
# NB: inputs are only present when this workflow is dispatched manually.
|
||||
# (The default below is the default field value in the form to trigger
|
||||
# a manual dispatch). Otherwise the inputs will evaluate to null.
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
@@ -49,10 +52,10 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- name: Remove unhelpful options from mypy config
|
||||
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
|
||||
355
CHANGES.md
355
CHANGES.md
@@ -1,3 +1,358 @@
|
||||
# Synapse 1.92.1 (2023-09-12)
|
||||
|
||||
Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead.
|
||||
|
||||
|
||||
# Synapse 1.92.0 (2023-09-12)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Fix incorrect docstring for `Ratelimiter`. ([\#16255](https://github.com/matrix-org/synapse/issues/16255))
|
||||
- Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266))
|
||||
|
||||
|
||||
# Synapse 1.92.0rc1 (2023-09-05)
|
||||
|
||||
### Features
|
||||
|
||||
- Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816))
|
||||
- Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113))
|
||||
- Experimental support for [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041): return a `Retry-After` header with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136))
|
||||
- Add `last_seen_ts` to the [admin users API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#16218](https://github.com/matrix-org/synapse/issues/16218))
|
||||
- Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). ([\#16155](https://github.com/matrix-org/synapse/issues/16155))
|
||||
- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. ([\#16185](https://github.com/matrix-org/synapse/issues/16185))
|
||||
- Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. ([\#16205](https://github.com/matrix-org/synapse/issues/16205))
|
||||
- Fix a rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210))
|
||||
- Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. ([\#16211](https://github.com/matrix-org/synapse/issues/16211))
|
||||
- Fix a long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Update links to the [matrix.org blog](https://matrix.org/blog/). ([\#16008](https://github.com/matrix-org/synapse/issues/16008))
|
||||
- Document which [admin APIs](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168))
|
||||
- Document [`exclude_rooms_from_sync`](https://matrix-org.github.io/synapse/v1.92/usage/configuration/config_documentation.html#exclude_rooms_from_sync) configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Prepare unit tests for Python 3.12. ([\#16099](https://github.com/matrix-org/synapse/issues/16099))
|
||||
- Fix nightly CI jobs. ([\#16121](https://github.com/matrix-org/synapse/issues/16121), [\#16213](https://github.com/matrix-org/synapse/issues/16213))
|
||||
- Describe which rate limiter was hit in logs. ([\#16135](https://github.com/matrix-org/synapse/issues/16135))
|
||||
- Simplify presence code when using workers. ([\#16170](https://github.com/matrix-org/synapse/issues/16170))
|
||||
- Track per-device information in the presence code. ([\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172))
|
||||
- Stop using the `event_txn_id` table. ([\#16175](https://github.com/matrix-org/synapse/issues/16175))
|
||||
- Use `AsyncMock` instead of custom code. ([\#16179](https://github.com/matrix-org/synapse/issues/16179), [\#16180](https://github.com/matrix-org/synapse/issues/16180))
|
||||
- Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. ([\#16183](https://github.com/matrix-org/synapse/issues/16183))
|
||||
- Task scheduler: add replication notify for new task to launch ASAP. ([\#16184](https://github.com/matrix-org/synapse/issues/16184))
|
||||
- Improve type hints. ([\#16186](https://github.com/matrix-org/synapse/issues/16186), [\#16188](https://github.com/matrix-org/synapse/issues/16188), [\#16201](https://github.com/matrix-org/synapse/issues/16201))
|
||||
- Bump black version to 23.7.0. ([\#16187](https://github.com/matrix-org/synapse/issues/16187))
|
||||
- Log the details of background update failures. ([\#16212](https://github.com/matrix-org/synapse/issues/16212))
|
||||
- Cache device resync requests over replication. ([\#16241](https://github.com/matrix-org/synapse/issues/16241))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.72 to 1.0.75. ([\#16141](https://github.com/matrix-org/synapse/issues/16141))
|
||||
* Bump furo from 2023.7.26 to 2023.8.19. ([\#16238](https://github.com/matrix-org/synapse/issues/16238))
|
||||
* Bump phonenumbers from 8.13.18 to 8.13.19. ([\#16237](https://github.com/matrix-org/synapse/issues/16237))
|
||||
* Bump psycopg2 from 2.9.6 to 2.9.7. ([\#16196](https://github.com/matrix-org/synapse/issues/16196))
|
||||
* Bump regex from 1.9.3 to 1.9.4. ([\#16195](https://github.com/matrix-org/synapse/issues/16195))
|
||||
* Bump ruff from 0.0.277 to 0.0.286. ([\#16198](https://github.com/matrix-org/synapse/issues/16198))
|
||||
* Bump sentry-sdk from 1.29.2 to 1.30.0. ([\#16236](https://github.com/matrix-org/synapse/issues/16236))
|
||||
* Bump serde from 1.0.184 to 1.0.188. ([\#16194](https://github.com/matrix-org/synapse/issues/16194))
|
||||
* Bump serde_json from 1.0.104 to 1.0.105. ([\#16140](https://github.com/matrix-org/synapse/issues/16140))
|
||||
* Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200))
|
||||
* Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199))
|
||||
|
||||
# Synapse 1.91.2 (2023-09-06)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258))
|
||||
|
||||
|
||||
# Synapse 1.91.1 (2023-09-04)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause an excessive linear growth in CPU usage. ([\#16220](https://github.com/matrix-org/synapse/issues/16220))
|
||||
|
||||
|
||||
# Synapse 1.91.0 (2023-08-30)
|
||||
|
||||
No significant changes since 1.91.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.91.0rc1 (2023-08-23)
|
||||
|
||||
### Features
|
||||
|
||||
- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870))
|
||||
- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891))
|
||||
- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030))
|
||||
- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094))
|
||||
- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052))
|
||||
- Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080))
|
||||
- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116))
|
||||
- Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124))
|
||||
- User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134))
|
||||
- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169))
|
||||
- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148))
|
||||
- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010))
|
||||
- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063))
|
||||
- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085))
|
||||
- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089))
|
||||
- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092))
|
||||
- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110))
|
||||
- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112))
|
||||
- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115))
|
||||
- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117))
|
||||
- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123))
|
||||
- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125))
|
||||
- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131))
|
||||
- MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132))
|
||||
- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149))
|
||||
- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158))
|
||||
- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152))
|
||||
- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157))
|
||||
- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159))
|
||||
- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160))
|
||||
- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145))
|
||||
* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103))
|
||||
* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143))
|
||||
* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108))
|
||||
* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109))
|
||||
* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144))
|
||||
* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142))
|
||||
* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139))
|
||||
* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107))
|
||||
* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106))
|
||||
* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105))
|
||||
* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146))
|
||||
|
||||
# Synapse 1.91.0rc1 (2023-08-23)
|
||||
|
||||
### Features
|
||||
|
||||
- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870))
|
||||
- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030))
|
||||
- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094))
|
||||
- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052))
|
||||
- Fix a long-standing bug in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080))
|
||||
- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116))
|
||||
- Filter out user agent references to the sliding sync proxy and rust-sdk from the `user_daily_visits` table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124))
|
||||
- User constent and third-party ID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134))
|
||||
- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169))
|
||||
- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148))
|
||||
- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010))
|
||||
- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063))
|
||||
- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085))
|
||||
- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089))
|
||||
- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092))
|
||||
- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110))
|
||||
- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112))
|
||||
- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115))
|
||||
- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117))
|
||||
- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123))
|
||||
- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125))
|
||||
- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131))
|
||||
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132))
|
||||
- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149))
|
||||
- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158))
|
||||
- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152))
|
||||
- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157))
|
||||
- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159))
|
||||
- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160))
|
||||
- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165))
|
||||
- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145))
|
||||
* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103))
|
||||
* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143))
|
||||
* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108))
|
||||
* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109))
|
||||
* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144))
|
||||
* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142))
|
||||
* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139))
|
||||
* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107))
|
||||
* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106))
|
||||
* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105))
|
||||
* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146))
|
||||
|
||||
# Synapse 1.90.0 (2023-08-15)
|
||||
|
||||
No significant changes since 1.90.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.90.0rc1 (2023-08-08)
|
||||
|
||||
### Features
|
||||
|
||||
- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629))
|
||||
- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791))
|
||||
- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012))
|
||||
- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043))
|
||||
- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015))
|
||||
- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016))
|
||||
- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964))
|
||||
- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525))
|
||||
- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754))
|
||||
- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991))
|
||||
- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992))
|
||||
- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993))
|
||||
- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019))
|
||||
- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031))
|
||||
- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023))
|
||||
- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028))
|
||||
- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068))
|
||||
- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011))
|
||||
* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048))
|
||||
* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077))
|
||||
* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034))
|
||||
* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044))
|
||||
* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081))
|
||||
* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076))
|
||||
* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073))
|
||||
* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982))
|
||||
* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033))
|
||||
* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074))
|
||||
* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032))
|
||||
* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038))
|
||||
* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037))
|
||||
* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036))
|
||||
* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035))
|
||||
* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078))
|
||||
* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079))
|
||||
|
||||
# Synapse 1.89.0 (2023-08-01)
|
||||
|
||||
No significant changes since 1.89.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.89.0rc1 (2023-07-25)
|
||||
|
||||
### Features
|
||||
|
||||
- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924))
|
||||
- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911))
|
||||
- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912))
|
||||
- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969))
|
||||
- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820))
|
||||
- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887))
|
||||
- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925))
|
||||
- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957))
|
||||
- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960))
|
||||
- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973))
|
||||
- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921))
|
||||
- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938))
|
||||
- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884))
|
||||
- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909))
|
||||
- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922))
|
||||
- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926))
|
||||
- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958))
|
||||
- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940))
|
||||
- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952))
|
||||
- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955))
|
||||
- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961))
|
||||
- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968))
|
||||
- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949))
|
||||
* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984))
|
||||
* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943))
|
||||
* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948))
|
||||
* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986))
|
||||
* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945))
|
||||
* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946))
|
||||
* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834))
|
||||
* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951))
|
||||
* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985))
|
||||
* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950))
|
||||
* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932))
|
||||
* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983))
|
||||
* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947))
|
||||
|
||||
# Synapse 1.88.0 (2023-07-18)
|
||||
|
||||
This release
|
||||
|
||||
38
Cargo.lock
generated
38
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.72"
|
||||
version = "1.0.75"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
|
||||
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -132,9 +132,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.19"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -291,9 +291,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.9.1"
|
||||
version = "1.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -303,9 +303,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -314,9 +314,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.3"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -332,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.171"
|
||||
version = "1.0.188"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
|
||||
checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.171"
|
||||
version = "1.0.188"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682"
|
||||
checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.25",
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.103"
|
||||
version = "1.0.105"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b"
|
||||
checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -386,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.25"
|
||||
version = "2.0.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -1 +0,0 @@
|
||||
Fix long-standing bug where remote invites weren't correctly pushed.
|
||||
@@ -1 +0,0 @@
|
||||
Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it.
|
||||
@@ -1 +0,0 @@
|
||||
Fix background schema updates failing over a large upgrade gap.
|
||||
@@ -1 +0,0 @@
|
||||
Document which Python version runs on a given Linux distribution so we can more easily clean up later.
|
||||
@@ -1 +0,0 @@
|
||||
Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009).
|
||||
@@ -1 +0,0 @@
|
||||
Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820).
|
||||
@@ -1 +0,0 @@
|
||||
Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`.
|
||||
@@ -1 +0,0 @@
|
||||
Better clarify how to run a worker instance (pass both configs).
|
||||
@@ -1 +0,0 @@
|
||||
Add details to warning in log when we fail to fetch an alias.
|
||||
@@ -1 +0,0 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting.
|
||||
@@ -1 +0,0 @@
|
||||
Remove unneeded `__init__`.
|
||||
@@ -1 +0,0 @@
|
||||
Remove support for calling the `/register` endpoint with an unspecced `user` property for application services.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
||||
@@ -1 +0,0 @@
|
||||
Improve the documentation for the login as a user admin API.
|
||||
@@ -1 +0,0 @@
|
||||
Unbreak the nix development environment by pinning the Rust version to 1.70.0.
|
||||
@@ -1 +0,0 @@
|
||||
Update presence metrics to differentiate remote vs local users.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
||||
@@ -1 +0,0 @@
|
||||
Ensure a long state res does not starve CPU by occasionally yielding to the reactor.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce the amount of state we pull out.
|
||||
@@ -1 +0,0 @@
|
||||
Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`.
|
||||
@@ -769,7 +769,7 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
global CONFIG_JSON
|
||||
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
|
||||
try:
|
||||
with open(config_path, "r") as config:
|
||||
with open(config_path) as config:
|
||||
syn_cmd.config = json.load(config)
|
||||
try:
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"enable": true,
|
||||
"expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}",
|
||||
"expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
|
||||
"iconColor": "purple",
|
||||
"name": "deploys",
|
||||
"titleFormat": "Deployed {{version}}"
|
||||
|
||||
66
debian/changelog
vendored
66
debian/changelog
vendored
@@ -1,3 +1,69 @@
|
||||
matrix-synapse-py3 (1.92.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Sep 2023 13:19:42 +0200
|
||||
|
||||
matrix-synapse-py3 (1.92.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Sep 2023 11:59:23 +0200
|
||||
|
||||
matrix-synapse-py3 (1.91.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.91.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Sep 2023 14:59:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.92.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Sep 2023 11:21:43 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Sep 2023 14:03:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Aug 2023 11:18:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 23 Aug 2023 09:47:18 -0700
|
||||
|
||||
matrix-synapse-py3 (1.90.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
|
||||
|
||||
matrix-synapse-py3 (1.88.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0.
|
||||
|
||||
@@ -861,7 +861,7 @@ def generate_worker_files(
|
||||
# Then a worker config file
|
||||
convert(
|
||||
"/conf/worker.yaml.j2",
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
f"/conf/workers/{worker_name}.yaml",
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
|
||||
@@ -82,7 +82,7 @@ def generate_config_from_template(
|
||||
with open(filename) as handle:
|
||||
value = handle.read()
|
||||
else:
|
||||
log("Generating a random secret for {}".format(secret))
|
||||
log(f"Generating a random secret for {secret}")
|
||||
value = codecs.encode(os.urandom(32), "hex").decode()
|
||||
with open(filename, "w") as handle:
|
||||
handle.write(value)
|
||||
|
||||
@@ -97,6 +97,7 @@
|
||||
- [Cancellation](development/synapse_architecture/cancellation.md)
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [Streams](development/synapse_architecture/streams.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Account validity API
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows a server administrator to manage the validity of an account. To
|
||||
use it, you must enable the account validity feature (under
|
||||
`account_validity`) in Synapse's configuration.
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Shared-Secret Registration
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
@@ -146,6 +146,7 @@ Body parameters:
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
granting them access to the Admin API, among other things.
|
||||
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
|
||||
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
|
||||
|
||||
Note: the `password` field must also be set if both of the following are true:
|
||||
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
|
||||
@@ -217,7 +218,9 @@ The following parameters should be set in the URL:
|
||||
- `name` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users.
|
||||
Defaults to `true` to include guest users.
|
||||
Defaults to `true` to include guest users. This parameter is not supported when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
- `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from
|
||||
the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results.
|
||||
- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users.
|
||||
Defaults to `false` to exclude deactivated users.
|
||||
- `limit` - string representing a positive integer - Is optional but is used for pagination,
|
||||
@@ -239,6 +242,7 @@ The following parameters should be set in the URL:
|
||||
- `displayname` - Users are ordered alphabetically by `displayname`.
|
||||
- `avatar_url` - Users are ordered alphabetically by avatar URL.
|
||||
- `creation_ts` - Users are ordered by when the users was created in ms.
|
||||
- `last_seen_ts` - Users are ordered by when the user was lastly seen in ms.
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
@@ -269,6 +273,7 @@ The following fields are returned in the JSON response body:
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
- `creation_ts` - integer - The user's creation timestamp in ms.
|
||||
- `last_seen_ts` - integer - The user's last activity timestamp in ms.
|
||||
|
||||
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
@@ -387,6 +392,8 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
|
||||
## Reset password
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Changes the password of another user. This will automatically log the user out of all their devices.
|
||||
|
||||
The api is:
|
||||
@@ -410,6 +417,8 @@ The parameter `logout_devices` is optional and defaults to `true`.
|
||||
|
||||
## Get whether a user is a server administrator or not
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
The api is:
|
||||
|
||||
```
|
||||
@@ -427,6 +436,8 @@ A response body like the following is returned:
|
||||
|
||||
## Change whether a user is a server administrator or not
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Note that you cannot demote yourself.
|
||||
|
||||
The api is:
|
||||
@@ -720,6 +731,8 @@ delete largest/smallest or newest/oldest files first.
|
||||
|
||||
## Login as a user
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
when admins wish to do actions on behalf of a user.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Note that this schedule might be modified depending on the availability of the
|
||||
Synapse team, e.g. releases may be skipped to avoid holidays.
|
||||
|
||||
Release announcements can be found in the
|
||||
[release category of the Matrix blog](https://matrix.org/blog/category/releases).
|
||||
[release category of the Matrix blog](https://matrix.org/category/releases).
|
||||
|
||||
## Bugfix releases
|
||||
|
||||
@@ -34,4 +34,4 @@ be held to be released together.
|
||||
|
||||
In some cases, a pre-disclosure of a security release will be issued as a notice
|
||||
to Synapse operators that there is an upcoming security release. These can be
|
||||
found in the [security category of the Matrix blog](https://matrix.org/blog/category/security).
|
||||
found in the [security category of the Matrix blog](https://matrix.org/category/security).
|
||||
|
||||
157
docs/development/synapse_architecture/streams.md
Normal file
157
docs/development/synapse_architecture/streams.md
Normal file
@@ -0,0 +1,157 @@
|
||||
## Streams
|
||||
|
||||
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
|
||||
).
|
||||
Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
|
||||
For example:
|
||||
|
||||
- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
|
||||
- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
|
||||
- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
|
||||
|
||||
See [`synapse.replication.tcp.streams`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
|
||||
) for the full list of streams.
|
||||
|
||||
It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
|
||||
To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
|
||||
https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
|
||||
).
|
||||
|
||||
### Definition
|
||||
|
||||
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
|
||||
Only "writers" can add facts to a stream, and there may be multiple writers.
|
||||
|
||||
Each fact has an ID, called its "stream ID".
|
||||
Readers should only process facts in ascending stream ID order.
|
||||
|
||||
Roughly speaking, each stream is backed by a database table.
|
||||
It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
|
||||
Typically, a fact is expressed with a single row in its backing table.[^2]
|
||||
Within a stream, no two facts may have the same stream_id.
|
||||
|
||||
> _Aside_. Some additional notes on streams' backing tables.
|
||||
>
|
||||
> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
|
||||
> 2. The backing tables may have other uses.
|
||||
> For example, the events table serves backs the events stream, and is read when processing new events.
|
||||
> But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
|
||||
> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
|
||||
|
||||
Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
|
||||
Stream writers need to track the completion of each stream fact.
|
||||
In the happy case, completion means a fact has been written to the stream table.
|
||||
But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
|
||||
Once completed, the rows written with that stream ID are fixed, and no new rows
|
||||
will be inserted with that ID.
|
||||
|
||||
### Current stream ID
|
||||
|
||||
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
|
||||
|
||||
> The current stream ID _for a writer W_ is the largest stream ID such that
|
||||
> all transactions added by W with equal or smaller ID have completed.
|
||||
|
||||
Similarly, there is a "linear" notion of current stream ID:
|
||||
|
||||
> The "linear" current stream ID is the largest stream ID such that
|
||||
> all facts (added by any writer) with equal or smaller ID have completed.
|
||||
|
||||
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
|
||||
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
|
||||
|
||||
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
|
||||
|
||||
For single-writer streams, the per-writer current ID and the linear current ID are the same.
|
||||
Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
|
||||
|
||||
|
||||
_Example_.
|
||||
Consider a single-writer stream which is initially at ID 1.
|
||||
|
||||
| Action | Current stream ID | Notes |
|
||||
|------------|-------------------|-------------------------------------------------|
|
||||
| | 1 | |
|
||||
| Reserve 2 | 1 | |
|
||||
| Reserve 3 | 1 | |
|
||||
| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
|
||||
| Complete 2 | 3 | current ID jumps from 1 -> 3 |
|
||||
| Reserve 4 | 3 | |
|
||||
| Reserve 5 | 3 | |
|
||||
| Reserve 6 | 3 | |
|
||||
| Complete 5 | 3 | |
|
||||
| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
|
||||
| Complete 6 | 6 | |
|
||||
|
||||
|
||||
### Multi-writer streams
|
||||
|
||||
There are two ways to view a multi-writer stream.
|
||||
|
||||
1. Treat it as a collection of distinct single-writer streams, one
|
||||
for each writer.
|
||||
2. Treat it as a single stream.
|
||||
|
||||
The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
|
||||
However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
|
||||
In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
|
||||
The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
|
||||
|
||||
Note that a multi-writer stream can be viewed in both ways.
|
||||
For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
|
||||
But the background process that works through events treats them as a single linear stream.
|
||||
|
||||
Another useful example is the cache invalidation stream.
|
||||
The facts this stream holds are instructions to "you should now invalidate these cache entries".
|
||||
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
|
||||
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
|
||||
|
||||
### Writing to streams
|
||||
|
||||
Writers need to track:
|
||||
- track their current position (i.e. its own per-writer stream ID).
|
||||
- their facts currently awaiting completion.
|
||||
|
||||
At startup,
|
||||
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
|
||||
- there are no facts awaiting completion.
|
||||
|
||||
To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
|
||||
|
||||
To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
|
||||
|
||||
To complete a fact, first remove it from your map of facts currently awaiting completion.
|
||||
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
|
||||
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
|
||||
|
||||
### Subscribing to streams
|
||||
|
||||
Readers need to track the current position of every writer.
|
||||
|
||||
At startup, they can find this by contacting each writer with a `REPLICATE` message,
|
||||
requesting that all writers reply describing their current position in their streams.
|
||||
Writers reply with a `POSITION` message.
|
||||
|
||||
To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
|
||||
The `RDATA` itself is not a self-contained representation of the fact;
|
||||
readers will have to query the stream tables for the full details.
|
||||
Readers must also advance their record of the writer's current position for that stream.
|
||||
|
||||
# Summary
|
||||
|
||||
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
|
||||
|
||||
|
||||
---
|
||||
|
||||
[^1]: we use the word _fact_ here for two reasons.
|
||||
Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
|
||||
Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
|
||||
|
||||
[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
|
||||
In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
|
||||
|
||||
[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
|
||||
nowadays it's done via Redis's Pubsub.
|
||||
@@ -95,7 +95,7 @@ matrix.example.com {
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
reverse_proxy localhost:8008
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
|
||||
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
A structured logging system can be useful when your logs are destined for a
|
||||
machine to parse and process. By maintaining its machine-readable characteristics,
|
||||
it enables more efficient searching and aggregations when consumed by software
|
||||
such as the "ELK stack".
|
||||
such as the [ELK stack](https://opensource.com/article/18/9/open-source-log-aggregation-tools).
|
||||
|
||||
Synapse's structured logging system is configured via the file that Synapse's
|
||||
`log_config` config option points to. The file should include a formatter which
|
||||
|
||||
@@ -88,6 +88,21 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.90.0
|
||||
|
||||
## App service query parameter authorization is now a configuration option
|
||||
|
||||
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
|
||||
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
|
||||
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
|
||||
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
|
||||
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
|
||||
This option defaults to false, but can be activated by adding
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
to your configuration.
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
@@ -97,7 +112,6 @@ The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Registration Tokens
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows you to manage tokens which can be used to authenticate
|
||||
registration requests, as proposed in
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
|
||||
|
||||
@@ -1242,6 +1242,14 @@ like sending a federation transaction.
|
||||
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
|
||||
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
|
||||
|
||||
The following options control the retry logic when communicating with a specific homeserver destination.
|
||||
Unlike the previous configuration options, these values apply across all requests
|
||||
for a given destination and the state of the backoff is stored in the database.
|
||||
|
||||
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
|
||||
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
|
||||
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation:
|
||||
@@ -1250,6 +1258,9 @@ federation:
|
||||
max_long_retry_delay: 100s
|
||||
max_short_retries: 5
|
||||
max_long_retries: 20
|
||||
destination_min_retry_interval: 30s
|
||||
destination_retry_multiplier: 5
|
||||
destination_max_retry_interval: 12h
|
||||
```
|
||||
---
|
||||
## Caching
|
||||
@@ -2837,6 +2848,20 @@ Example configuration:
|
||||
```yaml
|
||||
track_appservice_user_ips: true
|
||||
```
|
||||
---
|
||||
### `use_appservice_legacy_authorization`
|
||||
|
||||
Whether to send the application service access tokens via the `access_token` query parameter
|
||||
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
|
||||
access tokens via a query parameter.
|
||||
|
||||
**Enabling this option is considered insecure and is not recommended. **
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
|
||||
---
|
||||
### `macaroon_secret_key`
|
||||
|
||||
@@ -3000,6 +3025,16 @@ enable SAML login. You can either put your entire pysaml config inline using the
|
||||
option, or you can specify a path to a psyaml config file with the sub-option `config_path`.
|
||||
This setting has the following sub-options:
|
||||
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config.
|
||||
Default values will be used for the `entityid` and `service` settings,
|
||||
so it is not normally necessary to specify them unless you need to
|
||||
@@ -3151,7 +3186,7 @@ Options for each entry include:
|
||||
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
|
||||
@@ -3169,6 +3204,14 @@ Options for each entry include:
|
||||
|
||||
* `client_secret`: oauth2 client secret to use. May be omitted if
|
||||
`client_secret_jwt_key` is given, or if `client_auth_method` is 'none'.
|
||||
Must be omitted if `client_secret_path` is specified.
|
||||
|
||||
* `client_secret_path`: path to the oauth2 client secret to use. With that
|
||||
it's not necessary to leak secrets into the config file itself.
|
||||
Mutually exclusive with `client_secret`. Can be omitted if
|
||||
`client_secret_jwt_key` is specified.
|
||||
|
||||
*Added in Synapse 1.91.0.*
|
||||
|
||||
* `client_secret_jwt_key`: Alternative to client_secret: details of a key used
|
||||
to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
@@ -3366,7 +3409,18 @@ Enable Central Authentication Service (CAS) for registration and login.
|
||||
Has the following sub-options:
|
||||
* `enabled`: Set this to true to enable authorization against a CAS server.
|
||||
Defaults to false.
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `server_url`: The URL of the CAS authorization endpoint.
|
||||
* `protocol_version`: The CAS protocol version, defaults to none (version 3 is required if you want to use "required_attributes").
|
||||
* `displayname_attribute`: The attribute of the CAS response to use as the display name.
|
||||
If no name is given here, no displayname will be set.
|
||||
* `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes
|
||||
@@ -3380,6 +3434,7 @@ Example configuration:
|
||||
cas_config:
|
||||
enabled: true
|
||||
server_url: "https://cas-server.com"
|
||||
protocol_version: 3
|
||||
displayname_attribute: name
|
||||
required_attributes:
|
||||
userGroup: "staff"
|
||||
@@ -3606,6 +3661,7 @@ This option has the following sub-options:
|
||||
* `prefer_local_users`: Defines whether to prefer local users in search query results.
|
||||
If set to true, local users are more likely to appear above remote users when searching the
|
||||
user directory. Defaults to false.
|
||||
* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3613,6 +3669,7 @@ user_directory:
|
||||
enabled: false
|
||||
search_all_users: true
|
||||
prefer_local_users: true
|
||||
show_locked_users: true
|
||||
```
|
||||
---
|
||||
### `user_consent`
|
||||
@@ -3810,6 +3867,19 @@ Example configuration:
|
||||
```yaml
|
||||
forget_rooms_on_leave: false
|
||||
```
|
||||
---
|
||||
### `exclude_rooms_from_sync`
|
||||
A list of rooms to exclude from sync responses. This is useful for server
|
||||
administrators wishing to group users into a room without these users being able
|
||||
to see it from their client.
|
||||
|
||||
By default, no room is excluded.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- !foo:example.com
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing
|
||||
|
||||
62
flake.lock
generated
62
flake.lock
generated
@@ -8,16 +8,16 @@
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"lastModified": 1688058187,
|
||||
"narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"ref": "v0.6.3",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -39,12 +39,15 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -55,7 +58,7 @@
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
@@ -167,27 +170,27 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"lastModified": 1685801374,
|
||||
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -228,11 +231,11 @@
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"lastModified": 1688056373,
|
||||
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -246,7 +249,7 @@
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_2"
|
||||
"systems": "systems_3"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
@@ -255,11 +258,11 @@
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689302058,
|
||||
"narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=",
|
||||
"lastModified": 1690510705,
|
||||
"narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8",
|
||||
"rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -297,6 +300,21 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
@@ -39,13 +39,13 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
devenv.url = "github:cachix/devenv/v0.6.3";
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
16
mypy.ini
16
mypy.ini
@@ -45,6 +45,13 @@ warn_unused_ignores = False
|
||||
disallow_untyped_defs = False
|
||||
disallow_incomplete_defs = False
|
||||
|
||||
[mypy-synapse.util.manhole]
|
||||
# This module imports something from Twisted which has a bad annotation in Twisted trunk,
|
||||
# but is unannotated in Twisted's latest release. We want to type-ignore the problem
|
||||
# in the twisted trunk job, even though it has no effect on normal mypy runs.
|
||||
warn_unused_ignores = False
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
;; The `typeshed` project maintains stubs here:
|
||||
@@ -80,18 +87,9 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-srvlookup.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
810
poetry.lock
generated
810
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@
|
||||
showcontent = true
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py37', 'py38', 'py39', 'py310']
|
||||
target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
# black ignores everything in .gitignore by default, see
|
||||
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
|
||||
# Use `extend-exclude` if you want to exclude something in addition to this.
|
||||
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.88.0"
|
||||
version = "1.92.1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -306,10 +306,13 @@ all = [
|
||||
]
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
# We pin development dependencies in poetry.lock so that our tests don't start
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.277"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.0.286"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
@@ -367,7 +370,7 @@ furo = ">=2022.12.7,<2024.0.0"
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
|
||||
PushRules, SimpleJsonValue,
|
||||
@@ -26,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -71,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -116,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -161,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -194,7 +197,6 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
|
||||
|
||||
@@ -63,22 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
// We don't want to notify on edits. Not only can this be confusing in real
|
||||
// time (2 notifications, one message) but it's especially confusing
|
||||
// if a bridge needs to edit a previously backfilled message.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
|
||||
pattern: Cow::Borrowed("m.replace"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
|
||||
priority_class: 5,
|
||||
@@ -146,7 +130,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
|
||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||
}),
|
||||
)]),
|
||||
@@ -167,8 +151,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.room"),
|
||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.room"),
|
||||
value: Cow::Owned(SimpleJsonValue::Bool(true)),
|
||||
})),
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
key: Cow::Borrowed("room"),
|
||||
@@ -241,6 +225,21 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
// We don't want to notify on edits *unless* the edit directly mentions a
|
||||
// user, which is handled above.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
|
||||
EventPropertyIsCondition {
|
||||
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
|
||||
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
|
||||
priority_class: 5,
|
||||
|
||||
@@ -117,7 +117,7 @@ impl PushRuleEvaluator {
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = match flattened_keys.get("content.body") {
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
@@ -313,13 +313,15 @@ impl PushRuleEvaluator {
|
||||
};
|
||||
|
||||
let pattern = match &*exact_event_match.value_type {
|
||||
EventMatchPatternType::UserId => user_id,
|
||||
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
||||
EventMatchPatternType::UserId => user_id.to_owned(),
|
||||
EventMatchPatternType::UserLocalpart => {
|
||||
get_localpart_from_id(user_id)?.to_owned()
|
||||
}
|
||||
};
|
||||
|
||||
self.match_event_property_contains(
|
||||
exact_event_match.key.clone(),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
|
||||
)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
@@ -494,7 +496,7 @@ fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
@@ -522,7 +524,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
@@ -562,7 +564,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum SimpleJsonValue {
|
||||
Str(String),
|
||||
Str(Cow<'static, str>),
|
||||
Int(i64),
|
||||
Bool(bool),
|
||||
Null,
|
||||
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Str(s.to_string()))
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
@@ -527,7 +527,6 @@ pub struct FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@@ -539,7 +538,6 @@ impl FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
@@ -547,7 +545,6 @@ impl FilteredPushRules {
|
||||
msc1767_enabled,
|
||||
msc3381_polls_enabled,
|
||||
msc3664_enabled,
|
||||
msc3958_suppress_edits_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,12 +581,6 @@ impl FilteredPushRules {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc3958_suppress_edits_enabled
|
||||
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
|
||||
@@ -32,8 +32,8 @@ DISTS = (
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
@@ -46,7 +46,7 @@ can be passed on the commandline for debugging.
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
|
||||
class Builder(object):
|
||||
class Builder:
|
||||
def __init__(
|
||||
self,
|
||||
redirect_stdout: bool = False,
|
||||
|
||||
@@ -43,7 +43,7 @@ def main(force_colors: bool) -> None:
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
# Get the schema version of the local file to check against current schema on develop
|
||||
with open("synapse/storage/schema/__init__.py", "r") as file:
|
||||
with open("synapse/storage/schema/__init__.py") as file:
|
||||
local_schema = file.read()
|
||||
new_locals: Dict[str, Any] = {}
|
||||
exec(local_schema, new_locals)
|
||||
|
||||
@@ -247,7 +247,7 @@ def main() -> None:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -245,11 +244,17 @@ def _prepare() -> None:
|
||||
else:
|
||||
debian_version = new_version
|
||||
|
||||
run_until_successful(
|
||||
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
|
||||
shell=True,
|
||||
)
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
if sys.platform == "darwin":
|
||||
run_until_successful(
|
||||
f"docker run --rm -v .:/synapse ubuntu:latest /synapse/scripts-dev/docker_update_debian_changelog.sh {new_version}",
|
||||
shell=True,
|
||||
)
|
||||
else:
|
||||
run_until_successful(
|
||||
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
|
||||
shell=True,
|
||||
)
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
synapse_repo.git.add("-u")
|
||||
@@ -567,19 +572,27 @@ def _notify(message: str) -> None:
|
||||
# for this.
|
||||
click.echo(f"\a{message}")
|
||||
|
||||
app_name = "Synapse Release Script"
|
||||
|
||||
# Try and run notify-send, but don't raise an Exception if this fails
|
||||
# (This is best-effort)
|
||||
# TODO Support other platforms?
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
"Synapse Release Script",
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
if sys.platform == "darwin":
|
||||
# See https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/reference/ASLR_cmds.html#//apple_ref/doc/uid/TP40000983-CH216-SW224
|
||||
subprocess.run(
|
||||
f"""osascript -e 'display notification "{message}" with title "{app_name}"'""",
|
||||
shell=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
app_name,
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
|
||||
@@ -145,7 +145,7 @@ Example usage:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
|
||||
@@ -46,7 +46,6 @@ class FilteredPushRules:
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
|
||||
@@ -21,11 +21,20 @@ import os
|
||||
import sys
|
||||
from typing import Any, Dict
|
||||
|
||||
from PIL import ImageFile
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Allow truncated JPEG images to be thumbnailed.
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 8):
|
||||
#
|
||||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -78,7 +87,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import synapse.util
|
||||
import synapse.util # noqa: E402
|
||||
|
||||
__version__ = synapse.util.SYNAPSE_VERSION
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"users": ["shadow_banned", "approved", "locked"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
@@ -482,7 +482,10 @@ class Porter:
|
||||
do_backward[0] = False
|
||||
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
assert txn.description is not None
|
||||
headers: Optional[List[str]] = [
|
||||
column[0] for column in txn.description
|
||||
]
|
||||
else:
|
||||
headers = None
|
||||
|
||||
@@ -544,6 +547,7 @@ class Porter:
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select, (forward_chunk, self.batch_size))
|
||||
rows = txn.fetchall()
|
||||
assert txn.description is not None
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
return headers, rows
|
||||
@@ -761,7 +765,7 @@ class Porter:
|
||||
|
||||
# Step 2. Set up sequences
|
||||
#
|
||||
# We do this before porting the tables so that event if we fail half
|
||||
# We do this before porting the tables so that even if we fail half
|
||||
# way through the postgres DB always have sequences that are greater
|
||||
# than their respective tables. If we don't then creating the
|
||||
# `DataStore` object will fail due to the inconsistency.
|
||||
@@ -769,6 +773,10 @@ class Porter:
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
@@ -779,6 +787,11 @@ class Porter:
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -910,7 +923,8 @@ class Porter:
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select)
|
||||
rows = txn.fetchall()
|
||||
headers: List[str] = [column[0] for column in txn.description]
|
||||
assert txn.description is not None
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
ts_ind = headers.index("ts")
|
||||
|
||||
@@ -1083,7 +1097,10 @@ class Porter:
|
||||
)
|
||||
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
@@ -1093,7 +1110,7 @@ class Porter:
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
retcol=f"COALESCE(MAX({column_name}), 1)",
|
||||
allow_none=True,
|
||||
),
|
||||
)
|
||||
@@ -1193,10 +1210,10 @@ class CursesProgress(Progress):
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
|
||||
super(CursesProgress, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(CursesProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
@@ -1292,7 +1309,7 @@ class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super(MockHomeserver, self).__init__(
|
||||
super().__init__(
|
||||
hostname=config.server.server_name,
|
||||
config=config,
|
||||
reactor=reactor,
|
||||
|
||||
@@ -60,6 +60,7 @@ class Auth(Protocol):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
@@ -79,7 +80,7 @@ class InternalAuth(BaseAuth):
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired
|
||||
request, allow_guest, allow_expired, allow_locked
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
@@ -107,6 +108,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool,
|
||||
allow_expired: bool,
|
||||
allow_locked: bool,
|
||||
) -> Requester:
|
||||
"""Helper for get_user_by_req
|
||||
|
||||
@@ -126,6 +128,17 @@ class InternalAuth(BaseAuth):
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
# Deny the request if the user account is locked.
|
||||
if not allow_locked and await self.store.get_user_locked_status(
|
||||
requester.user.to_string()
|
||||
):
|
||||
raise AuthError(
|
||||
401,
|
||||
"User account has been locked",
|
||||
errcode=Codes.USER_LOCKED,
|
||||
additional_fields={"soft_logout": True},
|
||||
)
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
|
||||
@@ -20,6 +20,7 @@ from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret
|
||||
from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
|
||||
from authlib.oauth2.rfc7662 import IntrospectionToken
|
||||
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||
from prometheus_client import Histogram
|
||||
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
@@ -44,6 +45,13 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
introspection_response_timer = Histogram(
|
||||
"synapse_api_auth_delegated_introspection_response",
|
||||
"Time taken to get a response for an introspection request",
|
||||
["code"],
|
||||
)
|
||||
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
@@ -99,6 +107,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
assert self._config.client_id, "No client_id provided"
|
||||
assert auth_method is not None, "Invalid client_auth_method provided"
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token = self._config.admin_token
|
||||
@@ -163,14 +172,26 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# Do the actual request
|
||||
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||
# check the HTTP status code, and we do the body encoding ourselves.
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
start_time = self._clock.time()
|
||||
try:
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
except Exception:
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels("ERR").observe(end_time - start_time)
|
||||
raise
|
||||
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels(response.code).observe(
|
||||
end_time - start_time
|
||||
)
|
||||
|
||||
if response.code < 200 or response.code >= 300:
|
||||
raise HttpResponseException(
|
||||
@@ -196,6 +217,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
import enum
|
||||
|
||||
from typing_extensions import Final
|
||||
from typing import Final
|
||||
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import typing
|
||||
from enum import Enum
|
||||
from http import HTTPStatus
|
||||
@@ -80,6 +81,8 @@ class Codes(str, Enum):
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
# USER_LOCKED = "M_USER_LOCKED"
|
||||
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
|
||||
|
||||
# Part of MSC3848
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3848
|
||||
@@ -208,6 +211,11 @@ class SynapseError(CodeMessageException):
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
@property
|
||||
def debug_context(self) -> Optional[str]:
|
||||
"""Override this to add debugging context that shouldn't be sent to clients."""
|
||||
return None
|
||||
|
||||
|
||||
class InvalidAPICallError(SynapseError):
|
||||
"""You called an existing API endpoint, but fed that endpoint
|
||||
@@ -501,19 +509,31 @@ class InvalidCaptchaError(SynapseError):
|
||||
class LimitExceededError(SynapseError):
|
||||
"""A client has sent too many requests and is being throttled."""
|
||||
|
||||
include_retry_after_header = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
limiter_name: str,
|
||||
code: int = 429,
|
||||
msg: str = "Too Many Requests",
|
||||
retry_after_ms: Optional[int] = None,
|
||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||
):
|
||||
super().__init__(code, msg, errcode)
|
||||
headers = (
|
||||
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
|
||||
if self.include_retry_after_header and retry_after_ms is not None
|
||||
else None
|
||||
)
|
||||
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
self.limiter_name = limiter_name
|
||||
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||
|
||||
@property
|
||||
def debug_context(self) -> Optional[str]:
|
||||
return self.limiter_name
|
||||
|
||||
|
||||
class RoomKeysVersionError(SynapseError):
|
||||
"""A client has tried to upload to a non-current version of the room_keys store"""
|
||||
|
||||
@@ -40,7 +40,7 @@ class Ratelimiter:
|
||||
- the cost C of this request in tokens.
|
||||
Then, if there is room in the bucket for C tokens (T + C <= `burst_count`),
|
||||
the request is permitted and `cost` tokens are added to the bucket.
|
||||
Otherwise the request is denied, and the bucket continues to hold T tokens.
|
||||
Otherwise, the request is denied, and the bucket continues to hold T tokens.
|
||||
|
||||
This means that the limiter enforces an average request frequency of `rate_hz`,
|
||||
while accumulating a buffer of up to `burst_count` requests which can be consumed
|
||||
@@ -55,18 +55,23 @@ class Ratelimiter:
|
||||
request.
|
||||
|
||||
Args:
|
||||
store: The datastore providing get_ratelimit_for_user.
|
||||
clock: A homeserver clock, for retrieving the current time
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
cfg: The ratelimit configuration for this rate limiter including the
|
||||
allowed rate and burst count.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int
|
||||
self,
|
||||
store: DataStore,
|
||||
clock: Clock,
|
||||
cfg: RatelimitSettings,
|
||||
):
|
||||
self.clock = clock
|
||||
self.rate_hz = rate_hz
|
||||
self.burst_count = burst_count
|
||||
self.rate_hz = cfg.per_second
|
||||
self.burst_count = cfg.burst_count
|
||||
self.store = store
|
||||
self._limiter_name = cfg.key
|
||||
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
@@ -305,7 +310,8 @@ class Ratelimiter:
|
||||
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s))
|
||||
limiter_name=self._limiter_name,
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
|
||||
)
|
||||
|
||||
|
||||
@@ -322,7 +328,9 @@ class RequestRatelimiter:
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
cfg=RatelimitSettings(key=rc_message.key, per_second=0, burst_count=0),
|
||||
)
|
||||
self._rc_message = rc_message
|
||||
|
||||
@@ -332,8 +340,7 @@ class RequestRatelimiter:
|
||||
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=rc_admin_redaction.per_second,
|
||||
burst_count=rc_admin_redaction.burst_count,
|
||||
cfg=rc_admin_redaction,
|
||||
)
|
||||
else:
|
||||
self.admin_redaction_ratelimiter = None
|
||||
|
||||
@@ -91,6 +91,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.task_scheduler import TaskSchedulerWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
@@ -144,6 +145,7 @@ class GenericWorkerStore(
|
||||
TransactionWorkerStore,
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
|
||||
@@ -16,9 +16,6 @@ import logging
|
||||
import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -27,10 +24,11 @@ from typing import (
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
from typing_extensions import ParamSpec, TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
@@ -80,9 +78,7 @@ sent_todevice_counter = Counter(
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@@ -123,52 +119,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self.config = hs.config.appservice
|
||||
|
||||
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -177,12 +133,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -203,12 +159,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -241,15 +197,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
args: Mapping[bytes, Union[List[bytes], str]] = fields
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -285,12 +240,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
try:
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
info = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
@@ -401,13 +356,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
|
||||
await self.put_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
|
||||
@@ -186,9 +186,9 @@ class Config:
|
||||
TypeError, if given something other than an integer or a string
|
||||
ValueError: if given a string not of the form described above.
|
||||
"""
|
||||
if type(value) is int:
|
||||
if type(value) is int: # noqa: E721
|
||||
return value
|
||||
elif type(value) is str:
|
||||
elif isinstance(value, str):
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
@@ -218,9 +218,9 @@ class Config:
|
||||
TypeError, if given something other than an integer or a string
|
||||
ValueError: if given a string not of the form described above.
|
||||
"""
|
||||
if type(value) is int:
|
||||
if type(value) is int: # noqa: E721
|
||||
return value
|
||||
elif type(value) is str:
|
||||
elif isinstance(value, str):
|
||||
second = 1000
|
||||
minute = 60 * second
|
||||
hour = 60 * minute
|
||||
|
||||
@@ -34,7 +34,7 @@ class AppServiceConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
if not isinstance(self.app_service_config_files, list) or not all(
|
||||
type(x) is str for x in self.app_service_config_files
|
||||
isinstance(x, str) for x in self.app_service_config_files
|
||||
):
|
||||
raise ConfigError(
|
||||
"Expected '%s' to be a list of AS config files:"
|
||||
@@ -43,6 +43,14 @@ class AppServiceConfig(Config):
|
||||
)
|
||||
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
self.use_appservice_legacy_authorization = config.get(
|
||||
"use_appservice_legacy_authorization", False
|
||||
)
|
||||
if self.use_appservice_legacy_authorization:
|
||||
logger.warning(
|
||||
"The use of appservice legacy authorization via query params is deprecated"
|
||||
" and should be considered insecure."
|
||||
)
|
||||
|
||||
|
||||
def load_appservices(
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, List
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
from ._util import validate_config
|
||||
|
||||
|
||||
@@ -41,15 +41,30 @@ class CasConfig(Config):
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
|
||||
|
||||
self.cas_protocol_version = cas_config.get("protocol_version")
|
||||
if (
|
||||
self.cas_protocol_version is not None
|
||||
and self.cas_protocol_version not in [1, 2, 3]
|
||||
):
|
||||
raise ConfigError(
|
||||
"Unsupported CAS protocol version %s (only versions 1, 2, 3 are supported)"
|
||||
% (self.cas_protocol_version,),
|
||||
("cas_config", "protocol_version"),
|
||||
)
|
||||
self.cas_displayname_attribute = cas_config.get("displayname_attribute")
|
||||
required_attributes = cas_config.get("required_attributes") or {}
|
||||
self.cas_required_attributes = _parsed_required_attributes_def(
|
||||
required_attributes
|
||||
)
|
||||
|
||||
self.idp_name = cas_config.get("idp_name", "CAS")
|
||||
self.idp_icon = cas_config.get("idp_icon")
|
||||
self.idp_brand = cas_config.get("idp_brand")
|
||||
|
||||
else:
|
||||
self.cas_server_url = None
|
||||
self.cas_service_url = None
|
||||
self.cas_protocol_version = None
|
||||
self.cas_displayname_attribute = None
|
||||
self.cas_required_attributes = []
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||
import attr
|
||||
import attr.validators
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import Config, RootConfig
|
||||
@@ -173,6 +174,13 @@ class MSC3861:
|
||||
("enable_registration",),
|
||||
)
|
||||
|
||||
# We only need to test the user consent version, as if it must be set if the user_consent section was present in the config
|
||||
if root.consent.user_consent_version is not None:
|
||||
raise ConfigError(
|
||||
"User consent cannot be enabled when OAuth delegation is enabled",
|
||||
("user_consent",),
|
||||
)
|
||||
|
||||
if (
|
||||
root.oidc.oidc_enabled
|
||||
or root.saml2.saml2_enabled
|
||||
@@ -216,10 +224,10 @@ class MSC3861:
|
||||
("session_lifetime",),
|
||||
)
|
||||
|
||||
if not root.experimental.msc3970_enabled:
|
||||
if root.registration.enable_3pid_changes:
|
||||
raise ConfigError(
|
||||
"experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled",
|
||||
("experimental_features", "msc3970_enabled"),
|
||||
"enable_3pid_changes cannot be enabled when OAuth delegation is enabled",
|
||||
("enable_3pid_changes",),
|
||||
)
|
||||
|
||||
|
||||
@@ -247,6 +255,27 @@ class ExperimentalConfig(Config):
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
# MSC2697 (device dehydration)
|
||||
# Enabled by default since this option was added after adding the feature.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
|
||||
|
||||
# MSC3814 (dehydrated devices with SSSS)
|
||||
# This is an alternative method to achieve the same goals as MSC2697.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
|
||||
|
||||
if self.msc2697_enabled and self.msc3814_enabled:
|
||||
raise ConfigError(
|
||||
"MSC2697 and MSC3814 should not both be enabled.",
|
||||
(
|
||||
"experimental_features",
|
||||
"msc3814_enabled",
|
||||
),
|
||||
)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
|
||||
|
||||
@@ -355,11 +384,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3959: Do not generate notifications for edits.
|
||||
self.msc3958_supress_edit_notifs = experimental.get(
|
||||
"msc3958_supress_edit_notifs", False
|
||||
)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
@@ -376,9 +400,6 @@ class ExperimentalConfig(Config):
|
||||
"Invalid MSC3861 configuration", ("experimental", "msc3861")
|
||||
) from exc
|
||||
|
||||
# MSC3970: Scope transaction IDs to devices
|
||||
self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled)
|
||||
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
@@ -386,3 +407,11 @@ class ExperimentalConfig(Config):
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
)
|
||||
|
||||
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
|
||||
#
|
||||
# This is a bit hacky, but the most reasonable way to *alway* include the
|
||||
# headers.
|
||||
LimitExceededError.include_retry_after_header = experimental.get(
|
||||
"msc4041_enabled", False
|
||||
)
|
||||
|
||||
@@ -65,5 +65,23 @@ class FederationConfig(Config):
|
||||
self.max_long_retries = federation_config.get("max_long_retries", 10)
|
||||
self.max_short_retries = federation_config.get("max_short_retries", 3)
|
||||
|
||||
# Allow for the configuration of the backoff algorithm used
|
||||
# when trying to reach an unavailable destination.
|
||||
# Unlike previous configuration those values applies across
|
||||
# multiple requests and the state of the backoff is stored on DB.
|
||||
self.destination_min_retry_interval_ms = Config.parse_duration(
|
||||
federation_config.get("destination_min_retry_interval", "10m")
|
||||
)
|
||||
self.destination_retry_multiplier = federation_config.get(
|
||||
"destination_retry_multiplier", 2
|
||||
)
|
||||
self.destination_max_retry_interval_ms = min(
|
||||
Config.parse_duration(
|
||||
federation_config.get("destination_max_retry_interval", "7d")
|
||||
),
|
||||
# Set a hard-limit to not overflow the database column.
|
||||
2**62,
|
||||
)
|
||||
|
||||
|
||||
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
@@ -280,6 +280,20 @@ def _parse_oidc_config_dict(
|
||||
for x in oidc_config.get("attribute_requirements", [])
|
||||
]
|
||||
|
||||
# Read from either `client_secret_path` or `client_secret`. If both exist, error.
|
||||
client_secret = oidc_config.get("client_secret")
|
||||
client_secret_path = oidc_config.get("client_secret_path")
|
||||
if client_secret_path is not None:
|
||||
if client_secret is None:
|
||||
client_secret = read_file(
|
||||
client_secret_path, config_path + ("client_secret_path",)
|
||||
).rstrip("\n")
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Cannot specify both client_secret and client_secret_path",
|
||||
config_path + ("client_secret",),
|
||||
)
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
@@ -288,7 +302,7 @@ def _parse_oidc_config_dict(
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret=client_secret,
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
pkce_method=oidc_config.get("pkce_method", "auto"),
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, Optional, cast
|
||||
|
||||
import attr
|
||||
|
||||
@@ -21,16 +21,47 @@ from synapse.types import JsonDict
|
||||
from ._base import Config
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RatelimitSettings:
|
||||
def __init__(
|
||||
self,
|
||||
config: Dict[str, float],
|
||||
key: str
|
||||
per_second: float
|
||||
burst_count: int
|
||||
|
||||
@classmethod
|
||||
def parse(
|
||||
cls,
|
||||
config: Dict[str, Any],
|
||||
key: str,
|
||||
defaults: Optional[Dict[str, float]] = None,
|
||||
):
|
||||
) -> "RatelimitSettings":
|
||||
"""Parse config[key] as a new-style rate limiter config.
|
||||
|
||||
The key may refer to a nested dictionary using a full stop (.) to separate
|
||||
each nested key. For example, use the key "a.b.c" to parse the following:
|
||||
|
||||
a:
|
||||
b:
|
||||
c:
|
||||
per_second: 10
|
||||
burst_count: 200
|
||||
|
||||
If this lookup fails, we'll fallback to the defaults.
|
||||
"""
|
||||
defaults = defaults or {"per_second": 0.17, "burst_count": 3.0}
|
||||
|
||||
self.per_second = config.get("per_second", defaults["per_second"])
|
||||
self.burst_count = int(config.get("burst_count", defaults["burst_count"]))
|
||||
rl_config = config
|
||||
for part in key.split("."):
|
||||
rl_config = rl_config.get(part, {})
|
||||
|
||||
# By this point we should have hit the rate limiter parameters.
|
||||
# We don't actually check this though!
|
||||
rl_config = cast(Dict[str, float], rl_config)
|
||||
|
||||
return cls(
|
||||
key=key,
|
||||
per_second=rl_config.get("per_second", defaults["per_second"]),
|
||||
burst_count=int(rl_config.get("burst_count", defaults["burst_count"])),
|
||||
)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
@@ -49,15 +80,14 @@ class RatelimitConfig(Config):
|
||||
# Load the new-style messages config if it exists. Otherwise fall back
|
||||
# to the old method.
|
||||
if "rc_message" in config:
|
||||
self.rc_message = RatelimitSettings(
|
||||
config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
|
||||
self.rc_message = RatelimitSettings.parse(
|
||||
config, "rc_message", defaults={"per_second": 0.2, "burst_count": 10.0}
|
||||
)
|
||||
else:
|
||||
self.rc_message = RatelimitSettings(
|
||||
{
|
||||
"per_second": config.get("rc_messages_per_second", 0.2),
|
||||
"burst_count": config.get("rc_message_burst_count", 10.0),
|
||||
}
|
||||
key="rc_messages",
|
||||
per_second=config.get("rc_messages_per_second", 0.2),
|
||||
burst_count=config.get("rc_message_burst_count", 10.0),
|
||||
)
|
||||
|
||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||
@@ -79,51 +109,59 @@ class RatelimitConfig(Config):
|
||||
}
|
||||
)
|
||||
|
||||
self.rc_registration = RatelimitSettings(config.get("rc_registration", {}))
|
||||
self.rc_registration = RatelimitSettings.parse(config, "rc_registration", {})
|
||||
|
||||
self.rc_registration_token_validity = RatelimitSettings(
|
||||
config.get("rc_registration_token_validity", {}),
|
||||
self.rc_registration_token_validity = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_registration_token_validity",
|
||||
defaults={"per_second": 0.1, "burst_count": 5},
|
||||
)
|
||||
|
||||
# It is reasonable to login with a bunch of devices at once (i.e. when
|
||||
# setting up an account), but it is *not* valid to continually be
|
||||
# logging into new devices.
|
||||
rc_login_config = config.get("rc_login", {})
|
||||
self.rc_login_address = RatelimitSettings(
|
||||
rc_login_config.get("address", {}),
|
||||
self.rc_login_address = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_login.address",
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
self.rc_login_account = RatelimitSettings(
|
||||
rc_login_config.get("account", {}),
|
||||
self.rc_login_account = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_login.account",
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
self.rc_login_failed_attempts = RatelimitSettings(
|
||||
rc_login_config.get("failed_attempts", {})
|
||||
self.rc_login_failed_attempts = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_login.failed_attempts",
|
||||
{},
|
||||
)
|
||||
|
||||
self.federation_rr_transactions_per_room_per_second = config.get(
|
||||
"federation_rr_transactions_per_room_per_second", 50
|
||||
)
|
||||
|
||||
rc_admin_redaction = config.get("rc_admin_redaction")
|
||||
self.rc_admin_redaction = None
|
||||
if rc_admin_redaction:
|
||||
self.rc_admin_redaction = RatelimitSettings(rc_admin_redaction)
|
||||
if "rc_admin_redaction" in config:
|
||||
self.rc_admin_redaction = RatelimitSettings.parse(
|
||||
config, "rc_admin_redaction", {}
|
||||
)
|
||||
|
||||
self.rc_joins_local = RatelimitSettings(
|
||||
config.get("rc_joins", {}).get("local", {}),
|
||||
self.rc_joins_local = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_joins.local",
|
||||
defaults={"per_second": 0.1, "burst_count": 10},
|
||||
)
|
||||
self.rc_joins_remote = RatelimitSettings(
|
||||
config.get("rc_joins", {}).get("remote", {}),
|
||||
self.rc_joins_remote = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_joins.remote",
|
||||
defaults={"per_second": 0.01, "burst_count": 10},
|
||||
)
|
||||
|
||||
# Track the rate of joins to a given room. If there are too many, temporarily
|
||||
# prevent local joins and remote joins via this server.
|
||||
self.rc_joins_per_room = RatelimitSettings(
|
||||
config.get("rc_joins_per_room", {}),
|
||||
self.rc_joins_per_room = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_joins_per_room",
|
||||
defaults={"per_second": 1, "burst_count": 10},
|
||||
)
|
||||
|
||||
@@ -132,31 +170,37 @@ class RatelimitConfig(Config):
|
||||
# * For requests received over federation this is keyed by the origin.
|
||||
#
|
||||
# Note that this isn't exposed in the configuration as it is obscure.
|
||||
self.rc_key_requests = RatelimitSettings(
|
||||
config.get("rc_key_requests", {}),
|
||||
self.rc_key_requests = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_key_requests",
|
||||
defaults={"per_second": 20, "burst_count": 100},
|
||||
)
|
||||
|
||||
self.rc_3pid_validation = RatelimitSettings(
|
||||
config.get("rc_3pid_validation") or {},
|
||||
self.rc_3pid_validation = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_3pid_validation",
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_room = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_room", {}),
|
||||
self.rc_invites_per_room = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_invites.per_room",
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
self.rc_invites_per_user = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_user", {}),
|
||||
self.rc_invites_per_user = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_invites.per_user",
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_issuer = RatelimitSettings(
|
||||
config.get("rc_invites", {}).get("per_issuer", {}),
|
||||
self.rc_invites_per_issuer = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_invites.per_issuer",
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
|
||||
self.rc_third_party_invite = RatelimitSettings(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
self.rc_third_party_invite = RatelimitSettings.parse(
|
||||
config,
|
||||
"rc_third_party_invite",
|
||||
defaults={"per_second": 0.0025, "burst_count": 5},
|
||||
)
|
||||
|
||||
@@ -133,7 +133,16 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.enable_set_displayname = config.get("enable_set_displayname", True)
|
||||
self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
|
||||
self.enable_3pid_changes = config.get("enable_3pid_changes", True)
|
||||
|
||||
# The default value of enable_3pid_changes is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
self.enable_3pid_changes = config.get(
|
||||
"enable_3pid_changes", not msc3861_enabled
|
||||
)
|
||||
|
||||
self.disable_msisdn_registration = config.get(
|
||||
"disable_msisdn_registration", False
|
||||
|
||||
@@ -89,8 +89,14 @@ class SAML2Config(Config):
|
||||
"grandfathered_mxid_source_attribute", "uid"
|
||||
)
|
||||
|
||||
# refers to a SAML IdP entity ID
|
||||
self.saml2_idp_entityid = saml2_config.get("idp_entityid", None)
|
||||
|
||||
# IdP properties for Matrix clients
|
||||
self.idp_name = saml2_config.get("idp_name", "SAML")
|
||||
self.idp_icon = saml2_config.get("idp_icon")
|
||||
self.idp_brand = saml2_config.get("idp_brand")
|
||||
|
||||
# user_mapping_provider may be None if the key is present but has no value
|
||||
ump_dict = saml2_config.get("user_mapping_provider") or {}
|
||||
|
||||
|
||||
@@ -35,3 +35,4 @@ class UserDirectoryConfig(Config):
|
||||
self.user_directory_search_prefer_local_users = user_directory_config.get(
|
||||
"prefer_local_users", False
|
||||
)
|
||||
self.show_locked_users = user_directory_config.get("show_locked_users", False)
|
||||
|
||||
@@ -669,12 +669,18 @@ def _is_membership_change_allowed(
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif Membership.BAN == membership:
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
if user_level < ban_level:
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to ban",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif user_level <= target_level:
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"You don't have permission to ban this user",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif room_version.knock_join_rule and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.knock_restricted_join_rule
|
||||
@@ -846,11 +852,11 @@ def _check_power_levels(
|
||||
"kick",
|
||||
"invite",
|
||||
}:
|
||||
if type(v) is not int:
|
||||
if type(v) is not int: # noqa: E721
|
||||
raise SynapseError(400, f"{v!r} must be an integer.")
|
||||
if k in {"events", "notifications", "users"}:
|
||||
if not isinstance(v, collections.abc.Mapping) or not all(
|
||||
type(v) is int for v in v.values()
|
||||
type(v) is int for v in v.values() # noqa: E721
|
||||
):
|
||||
raise SynapseError(
|
||||
400,
|
||||
|
||||
@@ -186,9 +186,6 @@ class EventContext(UnpersistedEventContextBase):
|
||||
),
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
"partial_state": self.partial_state,
|
||||
# add dummy delta_ids and prev_group for backwards compatibility
|
||||
"delta_ids": None,
|
||||
"prev_group": None,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@@ -203,13 +200,6 @@ class EventContext(UnpersistedEventContextBase):
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
# workaround for backwards/forwards compatibility: if the input doesn't have a value
|
||||
# for "state_group_deltas" just assign an empty dict
|
||||
state_group_deltas = input.get("state_group_deltas", None)
|
||||
if state_group_deltas:
|
||||
state_group_deltas = _decode_state_group_delta(state_group_deltas)
|
||||
else:
|
||||
state_group_deltas = {}
|
||||
|
||||
context = EventContext(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
@@ -217,7 +207,7 @@ class EventContext(UnpersistedEventContextBase):
|
||||
storage=storage,
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
state_group_deltas=state_group_deltas,
|
||||
state_group_deltas=_decode_state_group_delta(input["state_group_deltas"]),
|
||||
state_delta_due_to_event=_decode_state_dict(
|
||||
input["state_delta_due_to_event"]
|
||||
),
|
||||
|
||||
@@ -136,11 +136,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
]
|
||||
|
||||
elif event_type == EventTypes.Create:
|
||||
# MSC2176 rules state that create events cannot be redacted.
|
||||
if room_version.updated_redaction_rules:
|
||||
return event_dict
|
||||
# MSC2176 rules state that create events cannot have their `content` redacted.
|
||||
new_content = event_dict["content"]
|
||||
elif not room_version.implicit_room_creator:
|
||||
# Some room versions give meaning to `creator`
|
||||
add_fields("creator")
|
||||
|
||||
add_fields("creator")
|
||||
elif event_type == EventTypes.JoinRules:
|
||||
add_fields("join_rule")
|
||||
if room_version.restricted_join_rule:
|
||||
@@ -392,7 +394,6 @@ def serialize_event(
|
||||
time_now_ms: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
msc3970_enabled: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
@@ -400,8 +401,6 @@ def serialize_event(
|
||||
e
|
||||
time_now_ms
|
||||
config: Event serialization config
|
||||
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
|
||||
include the `transaction_id` in the event's `unsigned` section.
|
||||
|
||||
Returns:
|
||||
The serialized event dictionary.
|
||||
@@ -427,38 +426,46 @@ def serialize_event(
|
||||
e.unsigned["redacted_because"],
|
||||
time_now_ms,
|
||||
config=config,
|
||||
msc3970_enabled=msc3970_enabled,
|
||||
)
|
||||
|
||||
# If we have a txn_id saved in the internal_metadata, we should include it in the
|
||||
# unsigned section of the event if it was sent by the same session as the one
|
||||
# requesting the event.
|
||||
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None and config.requester is not None:
|
||||
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
|
||||
# event internal metadata. Since we were not recording them before, if it hasn't
|
||||
# been recorded, we fallback to the old behaviour.
|
||||
if (
|
||||
txn_id is not None
|
||||
and config.requester is not None
|
||||
and config.requester.user.to_string() == e.sender
|
||||
):
|
||||
# Some events do not have the device ID stored in the internal metadata,
|
||||
# this includes old events as well as those created by appservice, guests,
|
||||
# or with tokens minted with the admin API. For those events, fallback
|
||||
# to using the access token instead.
|
||||
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
|
||||
if msc3970_enabled and event_device_id is not None:
|
||||
if event_device_id is not None:
|
||||
if event_device_id == config.requester.device_id:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
else:
|
||||
# The pre-MSC3970 behaviour is to only include the transaction ID if the
|
||||
# event was sent from the same access token. For regular users, we can use
|
||||
# the access token ID to determine this. For guests, we can't, but since
|
||||
# each guest only has one access token, we can just check that the event was
|
||||
# sent by the same user as the one requesting the event.
|
||||
# Fallback behaviour: only include the transaction ID if the event
|
||||
# was sent from the same access token.
|
||||
#
|
||||
# For regular users, the access token ID can be used to determine this.
|
||||
# This includes access tokens minted with the admin API.
|
||||
#
|
||||
# For guests and appservice users, we can't check the access token ID
|
||||
# so assume it is the same session.
|
||||
event_token_id: Optional[int] = getattr(
|
||||
e.internal_metadata, "token_id", None
|
||||
)
|
||||
if config.requester.user.to_string() == e.sender and (
|
||||
if (
|
||||
(
|
||||
event_token_id is not None
|
||||
and config.requester.access_token_id is not None
|
||||
and event_token_id == config.requester.access_token_id
|
||||
)
|
||||
or config.requester.is_guest
|
||||
or config.requester.app_service
|
||||
):
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
@@ -473,14 +480,16 @@ def serialize_event(
|
||||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
# If the event is a redaction, copy the redacts field from the content to
|
||||
# top-level for backwards compatibility.
|
||||
if (
|
||||
e.type == EventTypes.Redaction
|
||||
and e.room_version.updated_redaction_rules
|
||||
and e.redacts is not None
|
||||
):
|
||||
d["redacts"] = e.redacts
|
||||
# If the event is a redaction, the field with the redacted event ID appears
|
||||
# in a different location depending on the room version. e.redacts handles
|
||||
# fetching from the proper location; copy it to the other location for forwards-
|
||||
# and backwards-compatibility with clients.
|
||||
if e.type == EventTypes.Redaction and e.redacts is not None:
|
||||
if e.room_version.updated_redaction_rules:
|
||||
d["redacts"] = e.redacts
|
||||
else:
|
||||
d["content"] = dict(d["content"])
|
||||
d["content"]["redacts"] = e.redacts
|
||||
|
||||
only_event_fields = config.only_event_fields
|
||||
if only_event_fields:
|
||||
@@ -500,9 +509,6 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, *, msc3970_enabled: bool = False):
|
||||
self._msc3970_enabled = msc3970_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@@ -527,9 +533,7 @@ class EventClientSerializer:
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
|
||||
serialized_event = serialize_event(
|
||||
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
|
||||
)
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
@@ -698,7 +702,7 @@ def _copy_power_level_value_as_integer(
|
||||
:raises TypeError: if `old_value` is neither an integer nor a base-10 string
|
||||
representation of an integer.
|
||||
"""
|
||||
if type(old_value) is int:
|
||||
if type(old_value) is int: # noqa: E721
|
||||
power_levels[key] = old_value
|
||||
return
|
||||
|
||||
@@ -726,7 +730,7 @@ def validate_canonicaljson(value: Any) -> None:
|
||||
* Floats
|
||||
* NaN, Infinity, -Infinity
|
||||
"""
|
||||
if type(value) is int:
|
||||
if type(value) is int: # noqa: E721
|
||||
if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value:
|
||||
raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON)
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ class EventValidator:
|
||||
max_lifetime = event.content.get("max_lifetime")
|
||||
|
||||
if min_lifetime is not None:
|
||||
if type(min_lifetime) is not int:
|
||||
if type(min_lifetime) is not int: # noqa: E721
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'min_lifetime' must be an integer",
|
||||
@@ -159,7 +159,7 @@ class EventValidator:
|
||||
)
|
||||
|
||||
if max_lifetime is not None:
|
||||
if type(max_lifetime) is not int:
|
||||
if type(max_lifetime) is not int: # noqa: E721
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'max_lifetime' must be an integer",
|
||||
|
||||
@@ -280,7 +280,7 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
|
||||
_strip_unsigned_values(pdu_json)
|
||||
|
||||
depth = pdu_json["depth"]
|
||||
if type(depth) is not int:
|
||||
if type(depth) is not int: # noqa: E721
|
||||
raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON)
|
||||
|
||||
if depth < 0:
|
||||
|
||||
@@ -1891,7 +1891,7 @@ class TimestampToEventResponse:
|
||||
)
|
||||
|
||||
origin_server_ts = d.get("origin_server_ts")
|
||||
if type(origin_server_ts) is not int:
|
||||
if type(origin_server_ts) is not int: # noqa: E721
|
||||
raise ValueError(
|
||||
"Invalid response: 'origin_server_ts' must be a int but received %r"
|
||||
% origin_server_ts
|
||||
|
||||
@@ -63,6 +63,7 @@ from synapse.federation.federation_base import (
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
@@ -137,6 +138,7 @@ class FederationServer(FederationBase):
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
@@ -1236,9 +1238,18 @@ class FederationServer(FederationBase):
|
||||
logger.info("handling received PDU in room %s: %s", room_id, event)
|
||||
try:
|
||||
with nested_logging_context(event.event_id):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
# We're taking out a lock within a lock, which could
|
||||
# lead to deadlocks if we're not careful. However, it is
|
||||
# safe on this occasion as we only ever take a write
|
||||
# lock when deleting a room, which we would never do
|
||||
# while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME`
|
||||
# lock.
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
except FederationError as e:
|
||||
# XXX: Ideally we'd inform the remote we failed to process
|
||||
# the event, but we can't return an error in the transaction
|
||||
|
||||
@@ -49,7 +49,7 @@ from synapse.api.presence import UserPresenceState
|
||||
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
@@ -229,7 +229,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
|
||||
def send_presence_to_destinations(
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""As per FederationSender
|
||||
@@ -245,7 +245,9 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
async def send_device_messages(
|
||||
self, destinations: StrCollection, immediate: bool = True
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
@@ -463,7 +465,7 @@ class ParsedFederationStreamData:
|
||||
edus: Dict[str, List[Edu]]
|
||||
|
||||
|
||||
def process_rows_for_federation(
|
||||
async def process_rows_for_federation(
|
||||
transaction_queue: FederationSender,
|
||||
rows: List[FederationStream.FederationStreamRow],
|
||||
) -> None:
|
||||
@@ -496,7 +498,7 @@ def process_rows_for_federation(
|
||||
parsed_row.add_to_buffer(buff)
|
||||
|
||||
for state, destinations in buff.presence_destinations:
|
||||
transaction_queue.send_presence_to_destinations(
|
||||
await transaction_queue.send_presence_to_destinations(
|
||||
states=[state], destinations=destinations
|
||||
)
|
||||
|
||||
|
||||
@@ -147,7 +147,10 @@ from twisted.internet import defer
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.per_destination_queue import (
|
||||
CATCHUP_RETRY_INTERVAL,
|
||||
PerDestinationQueue,
|
||||
)
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
@@ -161,9 +164,10 @@ from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection
|
||||
from synapse.util import Clock
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import filter_destinations_by_retry_limiter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
@@ -213,7 +217,7 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_presence_to_destinations(
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
@@ -242,9 +246,11 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
async def send_device_messages(
|
||||
self, destinations: StrCollection, immediate: bool = True
|
||||
) -> None:
|
||||
"""Tells the sender that a new device message is ready to be sent to the
|
||||
destination. The `immediate` flag specifies whether the messages should
|
||||
destinations. The `immediate` flag specifies whether the messages should
|
||||
be tried to be sent immediately, or whether it can be delayed for a
|
||||
short while (to aid performance).
|
||||
"""
|
||||
@@ -716,6 +722,13 @@ class FederationSender(AbstractFederationSender):
|
||||
pdu.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
destinations = await filter_destinations_by_retry_limiter(
|
||||
destinations,
|
||||
clock=self.clock,
|
||||
store=self.store,
|
||||
retry_due_within_ms=CATCHUP_RETRY_INTERVAL,
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu)
|
||||
|
||||
@@ -763,12 +776,20 @@ class FederationSender(AbstractFederationSender):
|
||||
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
)
|
||||
domains = [
|
||||
domains: StrCollection = [
|
||||
d
|
||||
for d in domains_set
|
||||
if not self.is_mine_server_name(d)
|
||||
and self._federation_shard_config.should_handle(self._instance_name, d)
|
||||
]
|
||||
|
||||
domains = await filter_destinations_by_retry_limiter(
|
||||
domains,
|
||||
clock=self.clock,
|
||||
store=self.store,
|
||||
retry_due_within_ms=CATCHUP_RETRY_INTERVAL,
|
||||
)
|
||||
|
||||
if not domains:
|
||||
return
|
||||
|
||||
@@ -816,7 +837,7 @@ class FederationSender(AbstractFederationSender):
|
||||
for queue in queues:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
def send_presence_to_destinations(
|
||||
async def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""Send the given presence states to the given destinations.
|
||||
@@ -831,13 +852,20 @@ class FederationSender(AbstractFederationSender):
|
||||
for state in states:
|
||||
assert self.is_mine_id(state.user_id)
|
||||
|
||||
destinations = await filter_destinations_by_retry_limiter(
|
||||
[
|
||||
d
|
||||
for d in destinations
|
||||
if self._federation_shard_config.should_handle(self._instance_name, d)
|
||||
],
|
||||
clock=self.clock,
|
||||
store=self.store,
|
||||
retry_due_within_ms=CATCHUP_RETRY_INTERVAL,
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
if self.is_mine_server_name(destination):
|
||||
continue
|
||||
if not self._federation_shard_config.should_handle(
|
||||
self._instance_name, destination
|
||||
):
|
||||
continue
|
||||
|
||||
self._get_per_destination_queue(destination).send_presence(
|
||||
states, start_loop=False
|
||||
@@ -896,21 +924,29 @@ class FederationSender(AbstractFederationSender):
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
if self.is_mine_server_name(destination):
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
async def send_device_messages(
|
||||
self, destinations: StrCollection, immediate: bool = True
|
||||
) -> None:
|
||||
destinations = await filter_destinations_by_retry_limiter(
|
||||
[
|
||||
destination
|
||||
for destination in destinations
|
||||
if self._federation_shard_config.should_handle(
|
||||
self._instance_name, destination
|
||||
)
|
||||
and not self.is_mine_server_name(destination)
|
||||
],
|
||||
clock=self.clock,
|
||||
store=self.store,
|
||||
retry_due_within_ms=CATCHUP_RETRY_INTERVAL,
|
||||
)
|
||||
|
||||
if not self._federation_shard_config.should_handle(
|
||||
self._instance_name, destination
|
||||
):
|
||||
return
|
||||
|
||||
if immediate:
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
else:
|
||||
self._get_per_destination_queue(destination).mark_new_data()
|
||||
self._destination_wakeup_queue.add_to_queue(destination)
|
||||
for destination in destinations:
|
||||
if immediate:
|
||||
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||
else:
|
||||
self._get_per_destination_queue(destination).mark_new_data()
|
||||
self._destination_wakeup_queue.add_to_queue(destination)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when we want to retry sending transactions to a remote.
|
||||
|
||||
@@ -59,6 +59,10 @@ sent_edus_by_type = Counter(
|
||||
)
|
||||
|
||||
|
||||
# If the retry interval is larger than this then we enter "catchup" mode
|
||||
CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000
|
||||
|
||||
|
||||
class PerDestinationQueue:
|
||||
"""
|
||||
Manages the per-destination transmission queues.
|
||||
@@ -370,7 +374,7 @@ class PerDestinationQueue:
|
||||
),
|
||||
)
|
||||
|
||||
if e.retry_interval > 60 * 60 * 1000:
|
||||
if e.retry_interval > CATCHUP_RETRY_INTERVAL:
|
||||
# we won't retry for another hour!
|
||||
# (this suggests a significant outage)
|
||||
# We drop pending EDUs because otherwise they will
|
||||
|
||||
@@ -249,8 +249,10 @@ class TransportLayerClient:
|
||||
data=json_data,
|
||||
json_data_callback=json_data_callback,
|
||||
long_retries=True,
|
||||
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||
try_trailing_slash_on_400=True,
|
||||
# Sending a transaction should always succeed, if it doesn't
|
||||
# then something is wrong and we should backoff.
|
||||
backoff_on_all_error_codes=True,
|
||||
)
|
||||
|
||||
async def make_query(
|
||||
@@ -475,13 +477,11 @@ class TransportLayerClient:
|
||||
See synapse.federation.federation_client.FederationClient.get_public_rooms for
|
||||
more information.
|
||||
"""
|
||||
path = _create_v1_path("/publicRooms")
|
||||
|
||||
if search_filter:
|
||||
# this uses MSC2197 (Search Filtering over Federation)
|
||||
path = _create_v1_path("/publicRooms")
|
||||
|
||||
data: Dict[str, Any] = {
|
||||
"include_all_networks": "true" if include_all_networks else "false"
|
||||
}
|
||||
data: Dict[str, Any] = {"include_all_networks": include_all_networks}
|
||||
if third_party_instance_id:
|
||||
data["third_party_instance_id"] = third_party_instance_id
|
||||
if limit:
|
||||
@@ -505,17 +505,15 @@ class TransportLayerClient:
|
||||
)
|
||||
raise
|
||||
else:
|
||||
path = _create_v1_path("/publicRooms")
|
||||
|
||||
args: Dict[str, Union[str, Iterable[str]]] = {
|
||||
"include_all_networks": "true" if include_all_networks else "false"
|
||||
}
|
||||
if third_party_instance_id:
|
||||
args["third_party_instance_id"] = (third_party_instance_id,)
|
||||
args["third_party_instance_id"] = third_party_instance_id
|
||||
if limit:
|
||||
args["limit"] = [str(limit)]
|
||||
args["limit"] = str(limit)
|
||||
if since_token:
|
||||
args["since"] = [since_token]
|
||||
args["since"] = since_token
|
||||
|
||||
try:
|
||||
response = await self.client.get_json(
|
||||
|
||||
@@ -67,6 +67,7 @@ class AdminHandler:
|
||||
"name",
|
||||
"admin",
|
||||
"deactivated",
|
||||
"locked",
|
||||
"shadow_banned",
|
||||
"creation_ts",
|
||||
"appservice_id",
|
||||
@@ -75,6 +76,7 @@ class AdminHandler:
|
||||
"consent_ts",
|
||||
"user_type",
|
||||
"is_guest",
|
||||
"last_seen_ts",
|
||||
}
|
||||
|
||||
if self._msc3866_enabled:
|
||||
|
||||
@@ -218,19 +218,17 @@ class AuthHandler:
|
||||
self._failed_uia_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
|
||||
cfg=self.hs.config.ratelimiting.rc_login_failed_attempts,
|
||||
)
|
||||
|
||||
# The number of seconds to keep a UI auth session active.
|
||||
self._ui_auth_session_timeout = hs.config.auth.ui_auth_session_timeout
|
||||
|
||||
# Ratelimitier for failed /login attempts
|
||||
# Ratelimiter for failed /login attempts
|
||||
self._failed_login_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
|
||||
cfg=self.hs.config.ratelimiting.rc_login_failed_attempts,
|
||||
)
|
||||
|
||||
self._clock = self.hs.get_clock()
|
||||
|
||||
@@ -67,6 +67,7 @@ class CasHandler:
|
||||
|
||||
self._cas_server_url = hs.config.cas.cas_server_url
|
||||
self._cas_service_url = hs.config.cas.cas_service_url
|
||||
self._cas_protocol_version = hs.config.cas.cas_protocol_version
|
||||
self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute
|
||||
self._cas_required_attributes = hs.config.cas.cas_required_attributes
|
||||
|
||||
@@ -76,12 +77,13 @@ class CasHandler:
|
||||
self.idp_id = "cas"
|
||||
|
||||
# user-facing name of this auth provider
|
||||
self.idp_name = "CAS"
|
||||
self.idp_name = hs.config.cas.idp_name
|
||||
|
||||
# we do not currently support brands/icons for CAS auth, but this is required by
|
||||
# the SsoIdentityProvider protocol type.
|
||||
self.idp_icon = None
|
||||
self.idp_brand = None
|
||||
# MXC URI for icon for this auth provider
|
||||
self.idp_icon = hs.config.cas.idp_icon
|
||||
|
||||
# optional brand identifier for this auth provider
|
||||
self.idp_brand = hs.config.cas.idp_brand
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
|
||||
@@ -120,7 +122,10 @@ class CasHandler:
|
||||
Returns:
|
||||
The parsed CAS response.
|
||||
"""
|
||||
uri = self._cas_server_url + "/proxyValidate"
|
||||
if self._cas_protocol_version == 3:
|
||||
uri = self._cas_server_url + "/p3/proxyValidate"
|
||||
else:
|
||||
uri = self._cas_server_url + "/proxyValidate"
|
||||
args = {
|
||||
"ticket": ticket,
|
||||
"service": self._build_service_param(service_args),
|
||||
|
||||
@@ -385,6 +385,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self._account_data_handler = hs.get_account_data_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self.db_pool = hs.get_datastores().main.db_pool
|
||||
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
|
||||
@@ -653,29 +654,38 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
async def store_dehydrated_device(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
device_data: JsonDict,
|
||||
initial_device_display_name: Optional[str] = None,
|
||||
keys_for_device: Optional[JsonDict] = None,
|
||||
) -> str:
|
||||
"""Store a dehydrated device for a user. If the user had a previous
|
||||
dehydrated device, it is removed.
|
||||
"""Store a dehydrated device for a user, optionally storing the keys associated with
|
||||
it as well. If the user had a previous dehydrated device, it is removed.
|
||||
|
||||
Args:
|
||||
user_id: the user that we are storing the device for
|
||||
device_id: device id supplied by client
|
||||
device_data: the dehydrated device information
|
||||
initial_device_display_name: The display name to use for the device
|
||||
keys_for_device: keys for the dehydrated device
|
||||
Returns:
|
||||
device id of the dehydrated device
|
||||
"""
|
||||
device_id = await self.check_device_registered(
|
||||
user_id,
|
||||
None,
|
||||
device_id,
|
||||
initial_device_display_name,
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
old_device_id = await self.store.store_dehydrated_device(
|
||||
user_id, device_id, device_data
|
||||
user_id, device_id, device_data, time_now, keys_for_device
|
||||
)
|
||||
|
||||
if old_device_id is not None:
|
||||
await self.delete_devices(user_id, [old_device_id])
|
||||
|
||||
return device_id
|
||||
|
||||
async def rehydrate_device(
|
||||
@@ -720,6 +730,22 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
return {"success": True}
|
||||
|
||||
async def delete_dehydrated_device(self, user_id: str, device_id: str) -> None:
|
||||
"""
|
||||
Delete a stored dehydrated device.
|
||||
|
||||
Args:
|
||||
user_id: the user_id to delete the device from
|
||||
device_id: id of the dehydrated device to delete
|
||||
"""
|
||||
success = await self.store.remove_dehydrated_device(user_id, device_id)
|
||||
|
||||
if not success:
|
||||
raise errors.NotFoundError()
|
||||
|
||||
await self.delete_devices(user_id, [device_id])
|
||||
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
|
||||
|
||||
@wrap_as_background_process("_handle_new_device_update_async")
|
||||
async def _handle_new_device_update_async(self) -> None:
|
||||
"""Called when we have a new local device list update that we need to
|
||||
@@ -810,17 +836,16 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
user_id,
|
||||
hosts,
|
||||
)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(
|
||||
host, immediate=False
|
||||
)
|
||||
# TODO: when called, this isn't in a logging context.
|
||||
# This leads to log spam, sentry event spam, and massive
|
||||
# memory usage.
|
||||
# See https://github.com/matrix-org/synapse/issues/12552.
|
||||
# log_kv(
|
||||
# {"message": "sent device update to host", "host": host}
|
||||
# )
|
||||
await self.federation_sender.send_device_messages(
|
||||
hosts, immediate=False
|
||||
)
|
||||
# TODO: when called, this isn't in a logging context.
|
||||
# This leads to log spam, sentry event spam, and massive
|
||||
# memory usage.
|
||||
# See https://github.com/matrix-org/synapse/issues/12552.
|
||||
# log_kv(
|
||||
# {"message": "sent device update to host", "host": host}
|
||||
# )
|
||||
|
||||
if current_stream_id != stream_id:
|
||||
# Clear the set of hosts we've already sent to as we're
|
||||
@@ -925,8 +950,9 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
# Notify things that device lists need to be sent out.
|
||||
self.notifier.notify_replication()
|
||||
for host in potentially_changed_hosts:
|
||||
self.federation_sender.send_device_messages(host, immediate=False)
|
||||
await self.federation_sender.send_device_messages(
|
||||
potentially_changed_hosts, immediate=False
|
||||
)
|
||||
|
||||
|
||||
def _update_device_from_client_ips(
|
||||
@@ -1124,7 +1150,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
)
|
||||
|
||||
if resync:
|
||||
await self.multi_user_device_resync([user_id])
|
||||
# We mark as stale up front in case we get restarted.
|
||||
await self.store.mark_remote_users_device_caches_as_stale([user_id])
|
||||
run_as_background_process(
|
||||
"_maybe_retry_device_resync",
|
||||
self.multi_user_device_resync,
|
||||
[user_id],
|
||||
False,
|
||||
)
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -48,6 +49,9 @@ class DeviceMessageHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine = hs.is_mine
|
||||
if hs.config.experimental.msc3814_enabled:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
# We only need to poke the federation sender explicitly if its on the
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
@@ -86,8 +90,7 @@ class DeviceMessageHandler:
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.ratelimiting.rc_key_requests.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_key_requests.burst_count,
|
||||
cfg=hs.config.ratelimiting.rc_key_requests,
|
||||
)
|
||||
|
||||
async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None:
|
||||
@@ -299,7 +302,93 @@ class DeviceMessageHandler:
|
||||
)
|
||||
|
||||
if self.federation_sender:
|
||||
for destination in remote_messages.keys():
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
self.federation_sender.send_device_messages(destination)
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
await self.federation_sender.send_device_messages(remote_messages.keys())
|
||||
|
||||
async def get_events_for_dehydrated_device(
|
||||
self,
|
||||
requester: Requester,
|
||||
device_id: str,
|
||||
since_token: Optional[str],
|
||||
limit: int,
|
||||
) -> JsonDict:
|
||||
"""Fetches up to `limit` events sent to `device_id` starting from `since_token`
|
||||
and returns the new since token. If there are no more messages, returns an empty
|
||||
array.
|
||||
|
||||
Args:
|
||||
requester: the user requesting the messages
|
||||
device_id: ID of the dehydrated device
|
||||
since_token: stream id to start from when fetching messages
|
||||
limit: the number of messages to fetch
|
||||
Returns:
|
||||
A dict containing the to-device messages, as well as a token that the client
|
||||
can provide in the next call to fetch the next batch of messages
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# only allow fetching messages for the dehydrated device id currently associated
|
||||
# with the user
|
||||
dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
|
||||
if dehydrated_device is None:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"No dehydrated device exists",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
dehydrated_device_id, _ = dehydrated_device
|
||||
if device_id != dehydrated_device_id:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"You may only fetch messages for your dehydrated device",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
since_stream_id = 0
|
||||
if since_token:
|
||||
if not since_token.startswith("d"):
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
try:
|
||||
since_stream_id = int(since_token[1:])
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
to_token = self.event_sources.get_current_token().to_device_key
|
||||
|
||||
messages, stream_id = await self.store.get_messages_for_device(
|
||||
user_id, device_id, since_stream_id, to_token, limit
|
||||
)
|
||||
|
||||
for message in messages:
|
||||
# Remove the message id before sending to client
|
||||
message_id = message.pop("message_id", None)
|
||||
if message_id:
|
||||
set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
|
||||
|
||||
logger.debug(
|
||||
"Returning %d to-device messages between %d and %d (current token: %d) for "
|
||||
"dehydrated device %s, user_id %s",
|
||||
len(messages),
|
||||
since_stream_id,
|
||||
stream_id,
|
||||
to_token,
|
||||
device_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
return {
|
||||
"events": messages,
|
||||
"next_batch": f"d{stream_id}",
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ class EventStreamHandler:
|
||||
|
||||
context = await presence_handler.user_syncing(
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
affect_presence=affect_presence,
|
||||
presence_state=PresenceState.ONLINE,
|
||||
)
|
||||
|
||||
@@ -60,6 +60,7 @@ from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.handlers.pagination import PURGE_PAGINATION_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
|
||||
@@ -152,6 +153,7 @@ class FederationHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
|
||||
self._notifier = hs.get_notifier()
|
||||
self._worker_locks = hs.get_worker_locks_handler()
|
||||
|
||||
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
|
||||
hs
|
||||
@@ -200,7 +202,7 @@ class FederationHandler:
|
||||
@trace
|
||||
@tag_args
|
||||
async def maybe_backfill(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
self, room_id: str, current_depth: int, limit: int, record_time: bool = True
|
||||
) -> bool:
|
||||
"""Checks the database to see if we should backfill before paginating,
|
||||
and if so do.
|
||||
@@ -213,21 +215,25 @@ class FederationHandler:
|
||||
limit: The number of events that the pagination request will
|
||||
return. This is used as part of the heuristic to decide if we
|
||||
should back paginate.
|
||||
record_time: Whether to record the time it takes to backfill.
|
||||
|
||||
Returns:
|
||||
True if we actually tried to backfill something, otherwise False.
|
||||
"""
|
||||
# Starting the processing time here so we can include the room backfill
|
||||
# linearizer lock queue in the timing
|
||||
processing_start_time = self.clock.time_msec()
|
||||
processing_start_time = self.clock.time_msec() if record_time else 0
|
||||
|
||||
async with self._room_backfill.queue(room_id):
|
||||
return await self._maybe_backfill_inner(
|
||||
room_id,
|
||||
current_depth,
|
||||
limit,
|
||||
processing_start_time=processing_start_time,
|
||||
)
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
return await self._maybe_backfill_inner(
|
||||
room_id,
|
||||
current_depth,
|
||||
limit,
|
||||
processing_start_time=processing_start_time,
|
||||
)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@@ -305,12 +311,21 @@ class FederationHandler:
|
||||
# of history that extends all the way back to where we are currently paginating
|
||||
# and it's within the 100 events that are returned from `/backfill`.
|
||||
if not sorted_backfill_points and current_depth != MAX_DEPTH:
|
||||
# Check that we actually have later backfill points, if not just return.
|
||||
have_later_backfill_points = await self.store.get_backfill_points_in_room(
|
||||
room_id=room_id,
|
||||
current_depth=MAX_DEPTH,
|
||||
limit=1,
|
||||
)
|
||||
if not have_later_backfill_points:
|
||||
return False
|
||||
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
|
||||
)
|
||||
run_as_background_process(
|
||||
"_maybe_backfill_inner_anyway_with_max_depth",
|
||||
self._maybe_backfill_inner,
|
||||
self.maybe_backfill,
|
||||
room_id=room_id,
|
||||
# We use `MAX_DEPTH` so that we find all backfill points next
|
||||
# time (all events are below the `MAX_DEPTH`)
|
||||
@@ -319,7 +334,7 @@ class FederationHandler:
|
||||
# We don't want to start another timing observation from this
|
||||
# nested recursive call. The top-most call can record the time
|
||||
# overall otherwise the smaller one will throw off the results.
|
||||
processing_start_time=None,
|
||||
record_time=False,
|
||||
)
|
||||
# We return `False` because we're backfilling in the background and there is
|
||||
# no new events immediately for the caller to know about yet.
|
||||
|
||||
@@ -66,14 +66,12 @@ class IdentityHandler:
|
||||
self._3pid_validation_ratelimiter_ip = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
|
||||
cfg=hs.config.ratelimiting.rc_3pid_validation,
|
||||
)
|
||||
self._3pid_validation_ratelimiter_address = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
|
||||
cfg=hs.config.ratelimiting.rc_3pid_validation,
|
||||
)
|
||||
|
||||
async def ratelimit_request_token_requests(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user