Compare commits
126 Commits
release-v1
...
anoa/debug
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cf88d743e | ||
|
|
2f83b4f206 | ||
|
|
0f8e3337ca | ||
|
|
27efb8a38f | ||
|
|
d98a43d922 | ||
|
|
0a5f4f7665 | ||
|
|
f0a860908b | ||
|
|
9c462f18a4 | ||
|
|
4f5bccbbba | ||
|
|
01a45869f0 | ||
|
|
ca5d5de79b | ||
|
|
a51b0862a1 | ||
|
|
8fe1fd906a | ||
|
|
90ad836ed8 | ||
|
|
5eb3fd785b | ||
|
|
7cbb2a00d1 | ||
|
|
a4102d2a5f | ||
|
|
190c990a76 | ||
|
|
b7695ac388 | ||
|
|
1fb5a7ad5d | ||
|
|
fa2c116bef | ||
|
|
e02f4b7de2 | ||
|
|
21407c6709 | ||
|
|
ae55cc1e6b | ||
|
|
0c6142c4a1 | ||
|
|
fee0195b27 | ||
|
|
76b2218599 | ||
|
|
ea4ece3fcc | ||
|
|
68b2611783 | ||
|
|
a719b703d9 | ||
|
|
a461f1f846 | ||
|
|
f9f3e89354 | ||
|
|
f98f4f2e16 | ||
|
|
58f8305114 | ||
|
|
96529c4236 | ||
|
|
6dc019d9dd | ||
|
|
8d2a5586f7 | ||
|
|
76e392b0fa | ||
|
|
d4ea465496 | ||
|
|
8ebfd577e2 | ||
|
|
dbee081d14 | ||
|
|
99b7b801c3 | ||
|
|
641ff9ef7e | ||
|
|
05f8dada8b | ||
|
|
654902a758 | ||
|
|
4a711bf379 | ||
|
|
fc566cdf0a | ||
|
|
3b6208b835 | ||
|
|
3b8348b06e | ||
|
|
5c7364fea5 | ||
|
|
f08d05dd2c | ||
|
|
e1fa42249c | ||
|
|
fc1e534e41 | ||
|
|
835174180b | ||
|
|
fd44053b84 | ||
|
|
ad52db3b5c | ||
|
|
67f9e5293e | ||
|
|
19796e20aa | ||
|
|
40a3583ba1 | ||
|
|
cb6e2c6cc7 | ||
|
|
8e8431bc6e | ||
|
|
69699a9bd1 | ||
|
|
6d81aec09f | ||
|
|
e625c3dca0 | ||
|
|
199c270947 | ||
|
|
1c802de626 | ||
|
|
c692283751 | ||
|
|
43ee5d5bac | ||
|
|
1768dd3c27 | ||
|
|
0d522b58a6 | ||
|
|
b0e66721a5 | ||
|
|
6396527015 | ||
|
|
d2f46ae370 | ||
|
|
85e0541db1 | ||
|
|
cba2df20b5 | ||
|
|
8d3656b994 | ||
|
|
20ae617d14 | ||
|
|
2cacd0849a | ||
|
|
204b66c203 | ||
|
|
5bdf01fccd | ||
|
|
36c6b92bfc | ||
|
|
8eb7bb975e | ||
|
|
3bdb9b07fd | ||
|
|
0371a354cf | ||
|
|
ae391db777 | ||
|
|
d7fc87d973 | ||
|
|
224ef0b669 | ||
|
|
a4243183f0 | ||
|
|
92014fbf72 | ||
|
|
4ccfa16081 | ||
|
|
7c7bd9898b | ||
|
|
b516d91999 | ||
|
|
2328e90fbb | ||
|
|
5e82b07d2c | ||
|
|
c9bf644fa0 | ||
|
|
a704a35dd7 | ||
|
|
e55a9b3e41 | ||
|
|
6774f265b4 | ||
|
|
6e731e86bf | ||
|
|
c971698bff | ||
|
|
7477f43fd8 | ||
|
|
3710fea19d | ||
|
|
df8c8a4f45 | ||
|
|
8a529e4fb6 | ||
|
|
f25b0f8808 | ||
|
|
677272caed | ||
|
|
2481b7dfa4 | ||
|
|
f19dd39dfc | ||
|
|
b07b14b494 | ||
|
|
561d06b481 | ||
|
|
39d131b016 | ||
|
|
ce857c05d5 | ||
|
|
cc780b3f77 | ||
|
|
4cf9f92f39 | ||
|
|
95a96b21eb | ||
|
|
c303eca8cc | ||
|
|
c8e81898b6 | ||
|
|
861752b3aa | ||
|
|
670d590f8a | ||
|
|
07d7cbfe69 | ||
|
|
cd8b73aa97 | ||
|
|
53aa26eddc | ||
|
|
a587de96b8 | ||
|
|
411ba44790 | ||
|
|
aea94ca8cd | ||
|
|
9345361c6b |
@@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -46,13 +47,13 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
for version in ("3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
@@ -71,7 +72,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -133,11 +134,6 @@ if not IS_PR:
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -29,6 +29,16 @@ jobs:
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
|
||||
shell: bash
|
||||
run: |
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -61,7 +71,9 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
labels: |
|
||||
gitsha1=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
@@ -144,7 +144,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
||||
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -320,7 +320,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
@@ -362,7 +362,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
python-version: ["pypy-3.8"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -477,7 +477,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
- python-version: "3.8"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
|
||||
6
.github/workflows/twisted_trunk.yml
vendored
6
.github/workflows/twisted_trunk.yml
vendored
@@ -96,7 +96,11 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -34,6 +34,7 @@ __pycache__/
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
/homeserver-config-overrides.d
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
|
||||
127
CHANGES.md
127
CHANGES.md
@@ -1,3 +1,130 @@
|
||||
# Synapse 1.89.0 (2023-08-01)
|
||||
|
||||
No significant changes since 1.89.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.89.0rc1 (2023-07-25)
|
||||
|
||||
### Features
|
||||
|
||||
- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924))
|
||||
- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911))
|
||||
- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912))
|
||||
- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969))
|
||||
- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820))
|
||||
- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887))
|
||||
- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925))
|
||||
- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957))
|
||||
- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960))
|
||||
- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973))
|
||||
- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921))
|
||||
- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938))
|
||||
- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884))
|
||||
- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909))
|
||||
- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922))
|
||||
- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926))
|
||||
- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958))
|
||||
- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940))
|
||||
- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952))
|
||||
- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955))
|
||||
- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961))
|
||||
- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968))
|
||||
- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949))
|
||||
* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984))
|
||||
* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943))
|
||||
* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948))
|
||||
* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986))
|
||||
* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945))
|
||||
* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946))
|
||||
* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834))
|
||||
* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951))
|
||||
* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985))
|
||||
* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950))
|
||||
* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932))
|
||||
* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983))
|
||||
* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947))
|
||||
|
||||
# Synapse 1.88.0 (2023-07-18)
|
||||
|
||||
This release
|
||||
- raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and
|
||||
- removes deprecated config options related to worker deployment.
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information.
|
||||
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`", which was introduced in Synapse 1.88.0rc1. ([\#15953](https://github.com/matrix-org/synapse/issues/15953))
|
||||
|
||||
|
||||
# Synapse 1.88.0rc1 (2023-07-11)
|
||||
|
||||
### Features
|
||||
|
||||
- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release.
|
||||
Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862))
|
||||
- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852))
|
||||
- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860))
|
||||
- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907))
|
||||
- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782))
|
||||
- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787))
|
||||
- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826))
|
||||
- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888))
|
||||
- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853))
|
||||
- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854))
|
||||
- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861))
|
||||
- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874))
|
||||
- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906))
|
||||
- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864))
|
||||
* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865))
|
||||
* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897))
|
||||
* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902))
|
||||
* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900))
|
||||
* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867))
|
||||
* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901))
|
||||
* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866))
|
||||
|
||||
# Synapse 1.87.0 (2023-07-04)
|
||||
|
||||
Please note that this will be the last release of Synapse that is compatible with
|
||||
|
||||
54
Cargo.lock
generated
54
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.71"
|
||||
version = "1.0.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
|
||||
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -182,9 +182,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -229,9 +229,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
|
||||
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -273,9 +273,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -291,9 +291,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.8.4"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -302,9 +314,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -320,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.164"
|
||||
version = "1.0.179"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
|
||||
checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.164"
|
||||
version = "1.0.179"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
|
||||
checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn 2.0.25",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.99"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
|
||||
checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -374,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
version = "2.0.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -3,3 +3,4 @@
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
resolver = "2"
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,14 +34,6 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
1
changelog.d/15525.misc
Normal file
1
changelog.d/15525.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update SQL queries to inline boolean parameters as supported in SQLite 3.27.
|
||||
1
changelog.d/15629.feature
Normal file
1
changelog.d/15629.feature
Normal file
@@ -0,0 +1 @@
|
||||
Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)).
|
||||
1
changelog.d/15754.misc
Normal file
1
changelog.d/15754.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow for the configuration of the backoff algorithm for federation destinations.
|
||||
1
changelog.d/15791.bugfix
Normal file
1
changelog.d/15791.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers.
|
||||
1
changelog.d/15964.removal
Normal file
1
changelog.d/15964.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove support for legacy application service paths.
|
||||
1
changelog.d/15972.docker
Normal file
1
changelog.d/15972.docker
Normal file
@@ -0,0 +1 @@
|
||||
Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa.
|
||||
1
changelog.d/15991.misc
Normal file
1
changelog.d/15991.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow modules to check whether the current worker is configured to run background tasks.
|
||||
1
changelog.d/15992.misc
Normal file
1
changelog.d/15992.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC.
|
||||
1
changelog.d/16009.docker
Normal file
1
changelog.d/16009.docker
Normal file
@@ -0,0 +1 @@
|
||||
Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa.
|
||||
1
changelog.d/16011.misc
Normal file
1
changelog.d/16011.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update PyYAML to 6.0.1.
|
||||
1
changelog.d/16012.bugfix
Normal file
1
changelog.d/16012.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL.
|
||||
1
changelog.d/16013.misc
Normal file
1
changelog.d/16013.misc
Normal file
@@ -0,0 +1 @@
|
||||
Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10.
|
||||
2
changelog.d/16016.doc
Normal file
2
changelog.d/16016.doc
Normal file
@@ -0,0 +1,2 @@
|
||||
Clarify comment on the keys/upload over replication enpoint.
|
||||
|
||||
1
changelog.d/16017.removal
Normal file
1
changelog.d/16017.removal
Normal file
@@ -0,0 +1 @@
|
||||
Move support for application service query parameter authorization behind a configuration option.
|
||||
1
changelog.d/16019.misc
Normal file
1
changelog.d/16019.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix building the nix development environment on MacOS systems.
|
||||
1
changelog.d/16023.misc
Normal file
1
changelog.d/16023.misc
Normal file
@@ -0,0 +1 @@
|
||||
Combine duplicated code.
|
||||
1
changelog.d/16027.doc
Normal file
1
changelog.d/16027.doc
Normal file
@@ -0,0 +1 @@
|
||||
Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl.
|
||||
1
changelog.d/16028.misc
Normal file
1
changelog.d/16028.misc
Normal file
@@ -0,0 +1 @@
|
||||
Collect additional metrics from `ResponseCache` for eviction.
|
||||
1
changelog.d/16031.bugfix
Normal file
1
changelog.d/16031.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Remove leading and trailing spaces when setting a display name.
|
||||
1
changelog.d/16043.bugfix
Normal file
1
changelog.d/16043.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms.
|
||||
1
changelog.d/16044.misc
Normal file
1
changelog.d/16044.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update certifi to 2023.7.22 and pygments to 2.15.1.
|
||||
1
changelog.d/16063.misc
Normal file
1
changelog.d/16063.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix building the nix development environment on MacOS systems.
|
||||
@@ -63,7 +63,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"enable": true,
|
||||
"expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}",
|
||||
"expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
|
||||
"iconColor": "purple",
|
||||
"name": "deploys",
|
||||
"titleFormat": "Deployed {{version}}"
|
||||
|
||||
24
debian/changelog
vendored
24
debian/changelog
vendored
@@ -1,3 +1,27 @@
|
||||
matrix-synapse-py3 (1.89.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
|
||||
|
||||
matrix-synapse-py3 (1.88.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
|
||||
|
||||
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.87.0.
|
||||
|
||||
@@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
@@ -62,33 +62,29 @@ FROM docker.io/library/${distro}
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
|
||||
@@ -35,7 +35,11 @@ server {
|
||||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
{% if enable_redis %}
|
||||
redis:
|
||||
enabled: true
|
||||
{% if using_unix_sockets %}
|
||||
path: /tmp/redis.sock
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
|
||||
@@ -19,7 +19,11 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
{% if using_unix_sockets %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
|
||||
{% else %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
{% endif %}
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -8,7 +8,11 @@ worker_name: "{{ name }}"
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
{% if using_unix_sockets %}
|
||||
path: "/run/worker.{{ port }}"
|
||||
{% else %}
|
||||
port: {{ port }}
|
||||
{% endif %}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
|
||||
@@ -36,12 +36,17 @@ listeners:
|
||||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
{% if SYNAPSE_USE_UNIX_SOCKET %}
|
||||
# Unix sockets don't care about TLS or IP addresses or ports
|
||||
- path: '/run/main_public.sock'
|
||||
type: http
|
||||
{% else %}
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
{% endif %}
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
@@ -57,8 +62,11 @@ database:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
{% if not SYNAPSE_USE_UNIX_SOCKET %}
|
||||
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
|
||||
@@ -74,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
# Obviously, these would only be used with the UNIX socket option
|
||||
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
||||
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
@@ -407,11 +410,15 @@ def add_worker_roles_to_shared_config(
|
||||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
@@ -423,10 +430,15 @@ def add_worker_roles_to_shared_config(
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
@@ -718,17 +730,29 @@ def generate_worker_files(
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# Convenience helper for if using unix sockets instead of host:port
|
||||
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
listeners: List[Any]
|
||||
if using_unix_sockets:
|
||||
listeners = [
|
||||
{
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
else:
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
@@ -769,7 +793,17 @@ def generate_worker_files(
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
# This list ends up being part of the command line to curl, (curl added support for
|
||||
# Unix sockets in version 7.40).
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls = [
|
||||
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
||||
# The scheme and hostname from the following URL are ignored.
|
||||
# The only thing that matters is the path `/health`
|
||||
"http://localhost/health"
|
||||
]
|
||||
else:
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
@@ -806,8 +840,12 @@ def generate_worker_files(
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls.append(
|
||||
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
||||
)
|
||||
else:
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
@@ -826,6 +864,7 @@ def generate_worker_files(
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
@@ -846,8 +885,13 @@ def generate_worker_files(
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
if using_unix_sockets:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server unix:/run/worker.{port};\n"
|
||||
|
||||
else:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
@@ -877,10 +921,15 @@ def generate_worker_files(
|
||||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
if using_unix_sockets:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
}
|
||||
else:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -890,6 +939,7 @@ def generate_worker_files(
|
||||
appservice_registrations=appservice_registrations,
|
||||
enable_redis=workers_in_use,
|
||||
workers_in_use=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -900,6 +950,7 @@ def generate_worker_files(
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
@@ -909,6 +960,7 @@ def generate_worker_files(
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
enable_redis=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
convert(
|
||||
|
||||
@@ -242,6 +242,9 @@ The following parameters should be set in the URL:
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
@@ -729,7 +732,8 @@ POST /_synapse/admin/v1/users/<user_id>/login
|
||||
|
||||
An optional `valid_until_ms` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
do not expire. Note that this API does not allow a user to login as themselves
|
||||
(to create more tokens).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
@@ -1180,7 +1184,7 @@ The following parameters should be set in the URL:
|
||||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
## Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
@@ -1198,7 +1202,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
## Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -1237,7 +1241,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
## Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
|
||||
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
@@ -370,6 +370,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -95,7 +95,7 @@ matrix.example.com {
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
reverse_proxy localhost:8008
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
|
||||
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: background_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: event_persister1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
worker_name: event_persister1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
|
||||
@@ -87,6 +87,57 @@ process, for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.90.0
|
||||
|
||||
## App service query parameter authorization is now a configuration option
|
||||
|
||||
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
|
||||
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
|
||||
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
|
||||
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
|
||||
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
|
||||
This option defaults to false, but can be activated by adding
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
to your configuration.
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
|
||||
Application services can no longer call `/register` with a `user` property to create new users.
|
||||
The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.7 to v3.8.
|
||||
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian
|
||||
packages or Docker images, no action is required.
|
||||
|
||||
## Removal of `worker_replication_*` settings
|
||||
|
||||
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
|
||||
are being removed in this release of Synapse:
|
||||
|
||||
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
|
||||
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
|
||||
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
|
||||
|
||||
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
|
||||
(or create one if necessary). This is required if you have ***any*** workers at all;
|
||||
administrators of single-process (monolith) installations don't need to do anything.
|
||||
|
||||
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
|
||||
|
||||
|
||||
# Upgrading to v1.86.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md).
|
||||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
|
||||
Unix socket support (_Added in Synapse 1.89.0_):
|
||||
* `path`: A path and filename for a Unix socket. Make sure it is located in a
|
||||
directory with read and write permissions, and that it already exists (the directory
|
||||
will not be created). Defaults to `None`.
|
||||
* **Note**: The use of both `path` and `port` options for the same `listener` is not
|
||||
compatible.
|
||||
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
|
||||
* Other options that would not make sense to use with a UNIX socket, such as
|
||||
`bind_addresses` and `tls` will be ignored and can be removed.
|
||||
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
|
||||
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
|
||||
Also make sure that `metrics` is not included in `resources` -> `names`
|
||||
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
@@ -474,7 +488,7 @@ Valid resource names are:
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
|
||||
|
||||
* `openid`: OpenID authentication. See [here](../../openid.md).
|
||||
|
||||
@@ -533,6 +547,22 @@ listeners:
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
Example configuration #3:
|
||||
```yaml
|
||||
listeners:
|
||||
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
|
||||
# lightweight interprocess communication without TCP/IP overhead, avoid port
|
||||
# conflicts, and providing enhanced security through system file permissions.
|
||||
#
|
||||
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
#
|
||||
- path: /var/run/synapse/main_public.sock
|
||||
type: http
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
---
|
||||
### `manhole_settings`
|
||||
|
||||
@@ -1212,6 +1242,14 @@ like sending a federation transaction.
|
||||
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
|
||||
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
|
||||
|
||||
The following options control the retry logic when communicating with a specific homeserver destination.
|
||||
Unlike the previous configuration options, these values apply across all requests
|
||||
for a given destination and the state of the backoff is stored in the database.
|
||||
|
||||
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
|
||||
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
|
||||
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation:
|
||||
@@ -1220,6 +1258,9 @@ federation:
|
||||
max_long_retry_delay: 100s
|
||||
max_short_retries: 5
|
||||
max_long_retries: 20
|
||||
destination_min_retry_interval: 30s
|
||||
destination_retry_multiplier: 5
|
||||
destination_max_retry_interval: 12h
|
||||
```
|
||||
---
|
||||
## Caching
|
||||
@@ -2807,6 +2848,20 @@ Example configuration:
|
||||
```yaml
|
||||
track_appservice_user_ips: true
|
||||
```
|
||||
---
|
||||
### `use_appservice_legacy_authorization`
|
||||
|
||||
Whether to send the application service access tokens via the `access_token` query parameter
|
||||
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
|
||||
access tokens via a query parameter.
|
||||
|
||||
**Enabling this option is considered insecure and is not recommended. **
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
|
||||
---
|
||||
### `macaroon_secret_key`
|
||||
|
||||
@@ -3930,13 +3985,14 @@ federation_sender_instances:
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured, and to the main process.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
The main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is declared
|
||||
inside the `listener` block for a `replication` listener.
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
|
||||
replication listener of the worker, if configured, and to the main process. Each worker
|
||||
declared under [`stream_writers`](../../workers.md#stream-writers) and
|
||||
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
|
||||
replication listener, and that listener should be included in the `instance_map`. The
|
||||
main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is
|
||||
declared inside the `listener` block for a `replication` listener.
|
||||
|
||||
|
||||
Example configuration:
|
||||
@@ -3949,6 +4005,14 @@ instance_map:
|
||||
host: localhost
|
||||
port: 8034
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
path: /var/run/synapse/main_replication.sock
|
||||
worker1:
|
||||
path: /var/run/synapse/worker1_replication.sock
|
||||
```
|
||||
---
|
||||
### `stream_writers`
|
||||
|
||||
@@ -3966,6 +4030,24 @@ stream_writers:
|
||||
typing: worker1
|
||||
```
|
||||
---
|
||||
### `outbound_federation_restricted_to`
|
||||
|
||||
When using workers, you can restrict outbound federation traffic to only go through a
|
||||
specific subset of workers. Any worker specified here must also be in the
|
||||
[`instance_map`](#instance_map).
|
||||
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
|
||||
authorize inter-worker communication.
|
||||
|
||||
```yaml
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
Also see the [worker
|
||||
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
|
||||
for more info.
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
@@ -4090,51 +4172,6 @@ Example configuration:
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
@@ -4153,6 +4190,18 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
Example configuration(#2, using UNIX sockets with a `replication` listener):
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_public.sock
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_replication.sock
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
|
||||
@@ -24,11 +24,6 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,18 +131,6 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -321,4 +309,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
@@ -1,78 +0,0 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
window.SYNAPSE_VERSION = 'v1.87';
|
||||
@@ -95,9 +95,12 @@ for the main process
|
||||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
|
||||
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above).
|
||||
* Note that the `host` defined is the address the worker needs to look for the `main`
|
||||
process at, not necessarily the same address that is bound to.
|
||||
* If you are using Unix sockets for the `replication` resource, make sure to
|
||||
use a `path` to the socket file instead of a `port`.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
@@ -145,9 +148,6 @@ In the config file for each worker, you must specify:
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
|
||||
|
||||
For example:
|
||||
|
||||
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
|
||||
|
||||
You can start the main Synapse process with Poetry by running the following command:
|
||||
```console
|
||||
poetry run synapse_homeserver -c [your homeserver.yaml]
|
||||
poetry run synapse_homeserver --config-file [your homeserver.yaml]
|
||||
```
|
||||
For worker setups, you can run the following command
|
||||
```console
|
||||
poetry run synapse_worker -c [your worker.yaml]
|
||||
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
|
||||
```
|
||||
## Available worker applications
|
||||
|
||||
@@ -531,6 +531,30 @@ the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
#### Restrict outbound federation traffic to a specific set of workers
|
||||
|
||||
The
|
||||
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
|
||||
configuration is useful to make sure outbound federation traffic only goes through a
|
||||
specified subset of workers. This allows you to set more strict access controls (like a
|
||||
firewall) for all workers and only allow the `federation_sender`'s to contact the
|
||||
outside world.
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
federation_sender1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
|
||||
#### Background tasks
|
||||
|
||||
There is also support for moving background tasks to a separate
|
||||
|
||||
148
flake.lock
generated
148
flake.lock
generated
@@ -8,41 +8,20 @@
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"lastModified": 1688058187,
|
||||
"narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"ref": "v0.6.3",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682490133,
|
||||
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
@@ -60,12 +39,33 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -170,27 +170,27 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"lastModified": 1685801374,
|
||||
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -200,6 +200,22 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
@@ -215,11 +231,11 @@
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"lastModified": 1688056373,
|
||||
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -231,25 +247,27 @@
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"fenix": "fenix",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems"
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_3"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682426789,
|
||||
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
|
||||
"lastModified": 1690510705,
|
||||
"narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
@@ -267,6 +285,36 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
46
flake.nix
46
flake.nix
@@ -39,27 +39,27 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
# Rust toolchains and rust-analyzer nightly.
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
devenv.url = "github:cachix/devenv/v0.6.3";
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
devShells = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
overlays = [ (import rust-overlay) ];
|
||||
pkgs = import nixpkgs {
|
||||
inherit system overlays;
|
||||
};
|
||||
in {
|
||||
# Everything is configured via devenv - a Nix module for creating declarative
|
||||
# developer environments. See https://devenv.sh/reference/options/ for a list
|
||||
@@ -76,6 +76,23 @@
|
||||
# Configure packages to install.
|
||||
# Search for package names at https://search.nixos.org/packages?channel=unstable
|
||||
packages = with pkgs; [
|
||||
# The rust toolchain and related tools.
|
||||
# This will install the "default" profile of rust components.
|
||||
# https://rust-lang.github.io/rustup/concepts/profiles.html
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.70.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
})
|
||||
# The rust-analyzer language server implementation.
|
||||
rust-analyzer
|
||||
|
||||
# For building any Python bindings to C or Rust code.
|
||||
gcc
|
||||
|
||||
# Native dependencies for running Synapse.
|
||||
icu
|
||||
libffi
|
||||
@@ -108,7 +125,7 @@
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
languages.python.poetry.install.arguments = ["-v" "--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
|
||||
@@ -124,12 +141,11 @@
|
||||
# Install dependencies for the additional programming languages
|
||||
# involved with Synapse development.
|
||||
#
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# * Golang is needed to run the Complement test suite.
|
||||
# * Perl is needed to run the SyTest test suite.
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# It is installed manually with `packages` above.
|
||||
languages.go.enable = true;
|
||||
languages.rust.enable = true;
|
||||
languages.rust.version = "stable";
|
||||
languages.perl.enable = true;
|
||||
|
||||
# Postgres is needed to run Synapse with postgres support and
|
||||
@@ -178,7 +194,7 @@
|
||||
EOF
|
||||
'';
|
||||
# Start synapse when `devenv up` is run.
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
|
||||
905
poetry.lock
generated
905
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.87.0"
|
||||
version = "1.89.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7.1"
|
||||
python = "^3.8.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -203,9 +203,6 @@ ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
|
||||
pydantic = "^1.7.4"
|
||||
@@ -312,7 +309,7 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.275"
|
||||
ruff = "0.0.277"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
@@ -376,7 +373,15 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
|
||||
PushRules, SimpleJsonValue,
|
||||
@@ -26,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -71,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -116,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -161,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
|
||||
@@ -63,22 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
// We don't want to notify on edits. Not only can this be confusing in real
|
||||
// time (2 notifications, one message) but it's especially confusing
|
||||
// if a bridge needs to edit a previously backfilled message.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
|
||||
pattern: Cow::Borrowed("m.replace"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
|
||||
priority_class: 5,
|
||||
@@ -146,7 +130,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
|
||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||
}),
|
||||
)]),
|
||||
@@ -167,8 +151,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.room"),
|
||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.room"),
|
||||
value: Cow::Owned(SimpleJsonValue::Bool(true)),
|
||||
})),
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
key: Cow::Borrowed("room"),
|
||||
@@ -241,6 +225,21 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
// We don't want to notify on edits *unless* the edit directly mentions a
|
||||
// user, which is handled above.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
|
||||
EventPropertyIsCondition {
|
||||
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
|
||||
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
|
||||
priority_class: 5,
|
||||
|
||||
@@ -117,7 +117,7 @@ impl PushRuleEvaluator {
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = match flattened_keys.get("content.body") {
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
@@ -313,13 +313,15 @@ impl PushRuleEvaluator {
|
||||
};
|
||||
|
||||
let pattern = match &*exact_event_match.value_type {
|
||||
EventMatchPatternType::UserId => user_id,
|
||||
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
||||
EventMatchPatternType::UserId => user_id.to_owned(),
|
||||
EventMatchPatternType::UserLocalpart => {
|
||||
get_localpart_from_id(user_id)?.to_owned()
|
||||
}
|
||||
};
|
||||
|
||||
self.match_event_property_contains(
|
||||
exact_event_match.key.clone(),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
|
||||
)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
@@ -494,7 +496,7 @@ fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
@@ -522,7 +524,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
|
||||
@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum SimpleJsonValue {
|
||||
Str(String),
|
||||
Str(Cow<'static, str>),
|
||||
Int(i64),
|
||||
Bool(bool),
|
||||
Null,
|
||||
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Str(s.to_string()))
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
@@ -585,7 +585,7 @@ impl FilteredPushRules {
|
||||
}
|
||||
|
||||
if !self.msc3958_suppress_edits_enabled
|
||||
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
|
||||
&& rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -22,15 +22,19 @@ from typing import Collection, Optional, Sequence, Set
|
||||
|
||||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
#
|
||||
# If an EOL is forced by a Python version and we're dropping support for it, make sure
|
||||
# to remove references to the distibution across Synapse (search for "bullseye" for
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -214,7 +214,7 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -253,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
|
||||
@@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
if sys.version_info < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
@@ -196,6 +197,11 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
|
||||
# after porting, and b) the circular foreign key constraints make it hard to
|
||||
# port.
|
||||
"worker_read_write_locks_mode",
|
||||
"worker_read_write_locks",
|
||||
}
|
||||
|
||||
|
||||
@@ -239,6 +245,7 @@ class Store(
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -754,7 +761,7 @@ class Porter:
|
||||
|
||||
# Step 2. Set up sequences
|
||||
#
|
||||
# We do this before porting the tables so that event if we fail half
|
||||
# We do this before porting the tables so that even if we fail half
|
||||
# way through the postgres DB always have sequences that are greater
|
||||
# than their respective tables. If we don't then creating the
|
||||
# `DataStore` object will fail due to the inconsistency.
|
||||
@@ -762,6 +769,10 @@ class Porter:
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
@@ -772,6 +783,11 @@ class Porter:
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -803,7 +819,9 @@ class Porter:
|
||||
)
|
||||
# Map from table name to args passed to `handle_table`, i.e. a tuple
|
||||
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
|
||||
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
|
||||
tables_to_port_info_map = {
|
||||
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
|
||||
}
|
||||
|
||||
# Step 5. Do the copying.
|
||||
#
|
||||
@@ -1074,7 +1092,10 @@ class Porter:
|
||||
)
|
||||
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
@@ -1084,7 +1105,7 @@ class Porter:
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
retcol=f"COALESCE(MAX({column_name}), 1)",
|
||||
allow_none=True,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -217,6 +217,13 @@ class InvalidAPICallError(SynapseError):
|
||||
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
|
||||
|
||||
|
||||
class InvalidProxyCredentialsError(SynapseError):
|
||||
"""Error raised when the proxy credentials are invalid."""
|
||||
|
||||
def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
super().__init__(401, msg, errcode)
|
||||
|
||||
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
|
||||
@@ -78,36 +78,29 @@ class RoomVersion:
|
||||
# MSC2209: Check 'notifications' key while verifying
|
||||
# m.room.power_levels auth rules.
|
||||
limit_notifications_power_levels: bool
|
||||
# MSC2175: No longer include the creator in m.room.create events.
|
||||
msc2175_implicit_room_creator: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
|
||||
# content property.
|
||||
msc2176_redaction_rules: bool
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules: bool
|
||||
# MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
|
||||
# be enabled if MSC3083 is not.
|
||||
msc3375_redaction_rules: bool
|
||||
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
||||
# m.room.membership event with membership 'knock'.
|
||||
msc2403_knocking: bool
|
||||
# No longer include the creator in m.room.create events.
|
||||
implicit_room_creator: bool
|
||||
# Apply updated redaction rules algorithm from room version 11.
|
||||
updated_redaction_rules: bool
|
||||
# Support the 'restricted' join rule.
|
||||
restricted_join_rule: bool
|
||||
# Support for the proper redaction rules for the restricted join rule. This requires
|
||||
# restricted_join_rule to be enabled.
|
||||
restricted_join_rule_fix: bool
|
||||
# Support the 'knock' join rule.
|
||||
knock_join_rule: bool
|
||||
# MSC3389: Protect relation information from redaction.
|
||||
msc3389_relation_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3821: Do not redact the third_party_invite content field for membership events.
|
||||
msc3821_redaction_rules: bool
|
||||
# Support the 'knock_restricted' join rule.
|
||||
knock_restricted_join_rule: bool
|
||||
# Enforce integer power levels
|
||||
enforce_int_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3989: Redact the origin field.
|
||||
msc3989_redaction_rules: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -120,17 +113,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -141,17 +132,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -162,17 +151,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -183,17 +170,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -204,17 +189,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -225,38 +208,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -267,17 +227,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -288,17 +246,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -309,59 +265,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3821 = RoomVersion(
|
||||
"org.matrix.msc3821.opt1",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -372,17 +284,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
@@ -394,60 +304,34 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3989 = RoomVersion(
|
||||
"org.matrix.msc3989",
|
||||
RoomDisposition.UNSTABLE,
|
||||
V11 = RoomVersion(
|
||||
"11",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True,
|
||||
)
|
||||
MSC3820opt2 = RoomVersion(
|
||||
# Based upon v10
|
||||
"org.matrix.msc3820.opt2",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=True, # Used by MSC3820
|
||||
msc2176_redaction_rules=True, # Used by MSC3820
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=True, # Used by MSC3820
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True, # Used by MSC3820
|
||||
)
|
||||
|
||||
|
||||
@@ -460,14 +344,11 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC3989,
|
||||
RoomVersions.MSC3820opt2,
|
||||
RoomVersions.V11,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -496,12 +377,12 @@ MSC3244_CAPABILITIES = {
|
||||
RoomVersionCapability(
|
||||
"knock",
|
||||
RoomVersions.V7,
|
||||
lambda room_version: room_version.msc2403_knocking,
|
||||
lambda room_version: room_version.knock_join_rule,
|
||||
),
|
||||
RoomVersionCapability(
|
||||
"restricted",
|
||||
RoomVersions.V9,
|
||||
lambda room_version: room_version.msc3083_join_rules,
|
||||
lambda room_version: room_version.restricted_join_rule,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -386,6 +386,7 @@ def listen_unix(
|
||||
|
||||
|
||||
def listen_http(
|
||||
hs: "HomeServer",
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
@@ -406,6 +407,7 @@ def listen_http(
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
hs=hs,
|
||||
)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
|
||||
@@ -221,6 +221,7 @@ class GenericWorkerServer(HomeServer):
|
||||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
|
||||
@@ -139,6 +139,7 @@ class SynapseHomeServer(HomeServer):
|
||||
root_resource = OptionsResource()
|
||||
|
||||
ports = listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
|
||||
@@ -16,9 +16,6 @@ import logging
|
||||
import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -27,10 +24,11 @@ from typing import (
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
from typing_extensions import ParamSpec, TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
@@ -80,9 +78,7 @@ sent_todevice_counter = Counter(
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@@ -123,52 +119,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self.config = hs.config.appservice
|
||||
|
||||
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -177,12 +133,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -203,12 +159,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -241,15 +197,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
args: Mapping[bytes, Union[List[bytes], str]] = fields
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -285,12 +240,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
try:
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
info = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
@@ -401,13 +356,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
|
||||
await self.put_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
|
||||
@@ -43,6 +43,14 @@ class AppServiceConfig(Config):
|
||||
)
|
||||
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
self.use_appservice_legacy_authorization = config.get(
|
||||
"use_appservice_legacy_authorization", False
|
||||
)
|
||||
if self.use_appservice_legacy_authorization:
|
||||
logger.warning(
|
||||
"The use of appservice legacy authorization via query params is deprecated"
|
||||
" and should be considered insecure."
|
||||
)
|
||||
|
||||
|
||||
def load_appservices(
|
||||
|
||||
@@ -31,7 +31,7 @@ class AuthConfig(Config):
|
||||
|
||||
# The default value of password_config.enabled is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
config.get("experimental_features", {})
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
|
||||
@@ -216,12 +216,6 @@ class MSC3861:
|
||||
("session_lifetime",),
|
||||
)
|
||||
|
||||
if not root.experimental.msc3970_enabled:
|
||||
raise ConfigError(
|
||||
"experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled",
|
||||
("experimental_features", "msc3970_enabled"),
|
||||
)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
||||
class MSC3866Config:
|
||||
@@ -247,6 +241,27 @@ class ExperimentalConfig(Config):
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
# MSC2697 (device dehydration)
|
||||
# Enabled by default since this option was added after adding the feature.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
|
||||
|
||||
# MSC3814 (dehydrated devices with SSSS)
|
||||
# This is an alternative method to achieve the same goals as MSC2697.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
|
||||
|
||||
if self.msc2697_enabled and self.msc3814_enabled:
|
||||
raise ConfigError(
|
||||
"MSC2697 and MSC3814 should not both be enabled.",
|
||||
(
|
||||
"experimental_features",
|
||||
"msc3814_enabled",
|
||||
),
|
||||
)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
|
||||
|
||||
@@ -376,15 +391,9 @@ class ExperimentalConfig(Config):
|
||||
"Invalid MSC3861 configuration", ("experimental", "msc3861")
|
||||
) from exc
|
||||
|
||||
# MSC3970: Scope transaction IDs to devices
|
||||
self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled)
|
||||
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
# MSC4009: E.164 Matrix IDs
|
||||
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
|
||||
@@ -65,5 +65,23 @@ class FederationConfig(Config):
|
||||
self.max_long_retries = federation_config.get("max_long_retries", 10)
|
||||
self.max_short_retries = federation_config.get("max_short_retries", 3)
|
||||
|
||||
# Allow for the configuration of the backoff algorithm used
|
||||
# when trying to reach an unavailable destination.
|
||||
# Unlike previous configuration those values applies across
|
||||
# multiple requests and the state of the backoff is stored on DB.
|
||||
self.destination_min_retry_interval_ms = Config.parse_duration(
|
||||
federation_config.get("destination_min_retry_interval", "10m")
|
||||
)
|
||||
self.destination_retry_multiplier = federation_config.get(
|
||||
"destination_retry_multiplier", 2
|
||||
)
|
||||
self.destination_max_retry_interval_ms = min(
|
||||
Config.parse_duration(
|
||||
federation_config.get("destination_max_retry_interval", "7d")
|
||||
),
|
||||
# Set a hard-limit to not overflow the database column.
|
||||
2**62,
|
||||
)
|
||||
|
||||
|
||||
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
|
||||
@@ -41,11 +41,17 @@ Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
|
||||
Missing data for a worker to connect to main process. Please include '%s' in the
|
||||
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
|
||||
solution) in every worker's yaml as various `worker_replication_*` settings as defined
|
||||
in workers documentation here:
|
||||
`instance_map` declared in your shared yaml configuration as defined in configuration
|
||||
documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map`
|
||||
"""
|
||||
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """
|
||||
'%s' is no longer a supported worker setting, please place '%s' onto your shared
|
||||
configuration under `main` inside the `instance_map`. See workers documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
|
||||
"""
|
||||
|
||||
# This allows for a handy knob when it's time to change from 'master' to
|
||||
# something with less 'history'
|
||||
MAIN_PROCESS_INSTANCE_NAME = "master"
|
||||
@@ -88,7 +94,7 @@ class ConfigModel(BaseModel):
|
||||
allow_mutation = False
|
||||
|
||||
|
||||
class InstanceLocationConfig(ConfigModel):
|
||||
class InstanceTcpLocationConfig(ConfigModel):
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host: StrictStr
|
||||
@@ -104,6 +110,23 @@ class InstanceLocationConfig(ConfigModel):
|
||||
return f"{self.host}:{self.port}"
|
||||
|
||||
|
||||
class InstanceUnixLocationConfig(ConfigModel):
|
||||
"""The socket file to talk to an instance via HTTP replication."""
|
||||
|
||||
path: StrictStr
|
||||
|
||||
def scheme(self) -> str:
|
||||
"""Hardcode a retrievable scheme"""
|
||||
return "unix"
|
||||
|
||||
def netloc(self) -> str:
|
||||
"""Nicely format the address location data"""
|
||||
return f"{self.path}"
|
||||
|
||||
|
||||
InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig]
|
||||
|
||||
|
||||
@attr.s
|
||||
class WriterLocations:
|
||||
"""Specifies the instances that write various streams.
|
||||
@@ -148,6 +171,27 @@ class WriterLocations:
|
||||
)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class OutboundFederationRestrictedTo:
|
||||
"""Whether we limit outbound federation to a certain set of instances.
|
||||
|
||||
Attributes:
|
||||
instances: optional list of instances that can make outbound federation
|
||||
requests. If None then all instances can make federation requests.
|
||||
locations: list of instance locations to connect to proxy via.
|
||||
"""
|
||||
|
||||
instances: Optional[List[str]]
|
||||
locations: List[InstanceLocationConfig] = attr.Factory(list)
|
||||
|
||||
def __contains__(self, instance: str) -> bool:
|
||||
# It feels a bit dirty to return `True` if `instances` is `None`, but it makes
|
||||
# sense in downstream usage in the sense that if
|
||||
# `outbound_federation_restricted_to` is not configured, then any instance can
|
||||
# talk to federation (no restrictions so always return `True`).
|
||||
return self.instances is None or instance in self.instances
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
"""The workers are processes run separately to the main synapse process.
|
||||
They have their own pid_file and listener configuration. They use the
|
||||
@@ -216,22 +260,37 @@ class WorkerConfig(Config):
|
||||
)
|
||||
|
||||
# A map from instance name to host/port of their HTTP replication endpoint.
|
||||
# Check if the main process is declared. Inject it into the map if it's not,
|
||||
# based first on if a 'main' block is declared then on 'worker_replication_*'
|
||||
# data. If both are available, default to instance_map. The main process
|
||||
# itself doesn't need this data as it would never have to talk to itself.
|
||||
# Check if the main process is declared. The main process itself doesn't need
|
||||
# this data as it would never have to talk to itself.
|
||||
instance_map: Dict[str, Any] = config.get("instance_map", {})
|
||||
|
||||
if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
|
||||
# TODO: The next 3 condition blocks can be deleted after some time has
|
||||
# passed and we're ready to stop checking for these settings.
|
||||
# The host used to connect to the main synapse
|
||||
main_host = config.get("worker_replication_host", None)
|
||||
if main_host:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_host", main_host)
|
||||
)
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
main_port = config.get("worker_replication_http_port")
|
||||
if main_port:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_port", main_port)
|
||||
)
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
main_tls = config.get("worker_replication_http_tls", False)
|
||||
if main_tls:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_tls", main_tls)
|
||||
)
|
||||
|
||||
# For now, accept 'main' in the instance_map, but the replication system
|
||||
# expects 'master', force that into being until it's changed later.
|
||||
@@ -241,30 +300,20 @@ class WorkerConfig(Config):
|
||||
]
|
||||
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
|
||||
|
||||
# This is the backwards compatibility bit that handles the
|
||||
# worker_replication_* bits using setdefault() to not overwrite anything.
|
||||
elif main_host is not None and main_port is not None:
|
||||
instance_map.setdefault(
|
||||
MAIN_PROCESS_INSTANCE_NAME,
|
||||
{
|
||||
"host": main_host,
|
||||
"port": main_port,
|
||||
"tls": main_tls,
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
# If we've gotten here, it means that the main process is not on the
|
||||
# instance_map and that not enough worker_replication_* variables
|
||||
# were declared in the worker's yaml.
|
||||
# instance_map.
|
||||
raise ConfigError(
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
|
||||
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
)
|
||||
|
||||
# type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
|
||||
self.instance_map: Dict[
|
||||
str, InstanceLocationConfig
|
||||
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
|
||||
] = parse_and_validate_mapping(
|
||||
instance_map, InstanceLocationConfig # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
# Map from type of streams to source, c.f. WriterLocations.
|
||||
writers = config.get("stream_writers") or {}
|
||||
@@ -357,6 +406,28 @@ class WorkerConfig(Config):
|
||||
new_option_name="update_user_directory_from_worker",
|
||||
)
|
||||
|
||||
outbound_federation_restricted_to = config.get(
|
||||
"outbound_federation_restricted_to", None
|
||||
)
|
||||
self.outbound_federation_restricted_to = OutboundFederationRestrictedTo(
|
||||
outbound_federation_restricted_to
|
||||
)
|
||||
if outbound_federation_restricted_to:
|
||||
if not self.worker_replication_secret:
|
||||
raise ConfigError(
|
||||
"`worker_replication_secret` must be configured when using `outbound_federation_restricted_to`."
|
||||
)
|
||||
|
||||
for instance in outbound_federation_restricted_to:
|
||||
if instance not in self.instance_map:
|
||||
raise ConfigError(
|
||||
"Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config."
|
||||
% (instance,)
|
||||
)
|
||||
self.outbound_federation_restricted_to.locations.append(
|
||||
self.instance_map[instance]
|
||||
)
|
||||
|
||||
def _should_this_worker_perform_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
|
||||
@@ -126,7 +126,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
is_invite_via_allow_rule = (
|
||||
event.room_version.msc3083_join_rules
|
||||
event.room_version.restricted_join_rule
|
||||
and event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
@@ -352,11 +352,9 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC1767v10,
|
||||
}
|
||||
@@ -449,7 +447,7 @@ def _check_create(event: "EventBase") -> None:
|
||||
|
||||
# 1.4 If content has no creator field, reject if the room version requires it.
|
||||
if (
|
||||
not event.room_version.msc2175_implicit_room_creator
|
||||
not event.room_version.implicit_room_creator
|
||||
and EventContentFields.ROOM_CREATOR not in event.content
|
||||
):
|
||||
raise AuthError(403, "Create event lacks a 'creator' property")
|
||||
@@ -486,7 +484,7 @@ def _is_membership_change_allowed(
|
||||
key = (EventTypes.Create, "")
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_event_ids()[0] == create.event_id:
|
||||
if room_version.msc2175_implicit_room_creator:
|
||||
if room_version.implicit_room_creator:
|
||||
creator = create.sender
|
||||
else:
|
||||
creator = create.content[EventContentFields.ROOM_CREATOR]
|
||||
@@ -509,7 +507,7 @@ def _is_membership_change_allowed(
|
||||
caller_invited = caller and caller.membership == Membership.INVITE
|
||||
caller_knocked = (
|
||||
caller
|
||||
and room_version.msc2403_knocking
|
||||
and room_version.knock_join_rule
|
||||
and caller.membership == Membership.KNOCK
|
||||
)
|
||||
|
||||
@@ -609,9 +607,9 @@ def _is_membership_change_allowed(
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif (
|
||||
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
|
||||
room_version.restricted_join_rule and join_rule == JoinRules.RESTRICTED
|
||||
) or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
room_version.knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
# This is the same as public, but the event must contain a reference
|
||||
@@ -641,9 +639,9 @@ def _is_membership_change_allowed(
|
||||
|
||||
elif (
|
||||
join_rule == JoinRules.INVITE
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
room_version.knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
@@ -677,9 +675,9 @@ def _is_membership_change_allowed(
|
||||
"You don't have permission to ban",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
elif room_version.knock_join_rule and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
not room_version.knock_restricted_join_rule
|
||||
or join_rule != JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
raise AuthError(403, "You don't have permission to knock")
|
||||
@@ -836,7 +834,7 @@ def _check_power_levels(
|
||||
# Reject events with stringy power levels if required by room version
|
||||
if (
|
||||
event.type == EventTypes.PowerLevels
|
||||
and room_version_obj.msc3667_int_only_power_levels
|
||||
and room_version_obj.enforce_int_power_levels
|
||||
):
|
||||
for k, v in event.content.items():
|
||||
if k in {
|
||||
@@ -972,7 +970,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
||||
key = (EventTypes.Create, "")
|
||||
create_event = auth_events.get(key)
|
||||
if create_event is not None:
|
||||
if create_event.room_version.msc2175_implicit_room_creator:
|
||||
if create_event.room_version.implicit_room_creator:
|
||||
creator = create_event.sender
|
||||
else:
|
||||
creator = create_event.content[EventContentFields.ROOM_CREATOR]
|
||||
@@ -1110,7 +1108,7 @@ def auth_types_for_event(
|
||||
)
|
||||
auth_types.add(key)
|
||||
|
||||
if room_version.msc3083_join_rules and membership == Membership.JOIN:
|
||||
if room_version.restricted_join_rule and membership == Membership.JOIN:
|
||||
if EventContentFields.AUTHORISING_USER in event.content:
|
||||
key = (
|
||||
EventTypes.Member,
|
||||
|
||||
@@ -346,7 +346,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
@property
|
||||
def redacts(self) -> Optional[str]:
|
||||
"""MSC2176 moved the redacts field into the content."""
|
||||
if self.room_version.msc2176_redaction_rules:
|
||||
if self.room_version.updated_redaction_rules:
|
||||
return self.content.get("redacts")
|
||||
return self.get("redacts")
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ class EventBuilder:
|
||||
|
||||
# MSC2174 moves the redacts property to the content, it is invalid to
|
||||
# provide it as a top-level property.
|
||||
if self._redacts is not None and not self.room_version.msc2176_redaction_rules:
|
||||
if self._redacts is not None and not self.room_version.updated_redaction_rules:
|
||||
event_dict["redacts"] = self._redacts
|
||||
|
||||
if self._origin_server_ts is not None:
|
||||
|
||||
@@ -108,13 +108,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
"origin_server_ts",
|
||||
]
|
||||
|
||||
# Room versions from before MSC2176 had additional allowed keys.
|
||||
if not room_version.msc2176_redaction_rules:
|
||||
allowed_keys.extend(["prev_state", "membership"])
|
||||
|
||||
# Room versions before MSC3989 kept the origin field.
|
||||
if not room_version.msc3989_redaction_rules:
|
||||
allowed_keys.append("origin")
|
||||
# Earlier room versions from had additional allowed keys.
|
||||
if not room_version.updated_redaction_rules:
|
||||
allowed_keys.extend(["prev_state", "membership", "origin"])
|
||||
|
||||
event_type = event_dict["type"]
|
||||
|
||||
@@ -127,9 +123,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
|
||||
if event_type == EventTypes.Member:
|
||||
add_fields("membership")
|
||||
if room_version.msc3375_redaction_rules:
|
||||
if room_version.restricted_join_rule_fix:
|
||||
add_fields(EventContentFields.AUTHORISING_USER)
|
||||
if room_version.msc3821_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
# Preserve the signed field under third_party_invite.
|
||||
third_party_invite = event_dict["content"].get("third_party_invite")
|
||||
if isinstance(third_party_invite, collections.abc.Mapping):
|
||||
@@ -140,14 +136,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
]
|
||||
|
||||
elif event_type == EventTypes.Create:
|
||||
# MSC2176 rules state that create events cannot be redacted.
|
||||
if room_version.msc2176_redaction_rules:
|
||||
return event_dict
|
||||
if room_version.updated_redaction_rules:
|
||||
# MSC2176 rules state that create events cannot have their `content` redacted.
|
||||
new_content = event_dict["content"]
|
||||
elif not room_version.implicit_room_creator:
|
||||
# Some room versions give meaning to `creator`
|
||||
add_fields("creator")
|
||||
|
||||
add_fields("creator")
|
||||
elif event_type == EventTypes.JoinRules:
|
||||
add_fields("join_rule")
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
add_fields("allow")
|
||||
elif event_type == EventTypes.PowerLevels:
|
||||
add_fields(
|
||||
@@ -161,14 +159,14 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
"redact",
|
||||
)
|
||||
|
||||
if room_version.msc2176_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
add_fields("invite")
|
||||
|
||||
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
|
||||
add_fields("aliases")
|
||||
elif event_type == EventTypes.RoomHistoryVisibility:
|
||||
add_fields("history_visibility")
|
||||
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
||||
elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules:
|
||||
add_fields("redacts")
|
||||
|
||||
# Protect the rel_type and event_id fields under the m.relates_to field.
|
||||
@@ -396,7 +394,6 @@ def serialize_event(
|
||||
time_now_ms: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
msc3970_enabled: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
@@ -404,8 +401,6 @@ def serialize_event(
|
||||
e
|
||||
time_now_ms
|
||||
config: Event serialization config
|
||||
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
|
||||
include the `transaction_id` in the event's `unsigned` section.
|
||||
|
||||
Returns:
|
||||
The serialized event dictionary.
|
||||
@@ -431,38 +426,46 @@ def serialize_event(
|
||||
e.unsigned["redacted_because"],
|
||||
time_now_ms,
|
||||
config=config,
|
||||
msc3970_enabled=msc3970_enabled,
|
||||
)
|
||||
|
||||
# If we have a txn_id saved in the internal_metadata, we should include it in the
|
||||
# unsigned section of the event if it was sent by the same session as the one
|
||||
# requesting the event.
|
||||
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None and config.requester is not None:
|
||||
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
|
||||
# event internal metadata. Since we were not recording them before, if it hasn't
|
||||
# been recorded, we fallback to the old behaviour.
|
||||
if (
|
||||
txn_id is not None
|
||||
and config.requester is not None
|
||||
and config.requester.user.to_string() == e.sender
|
||||
):
|
||||
# Some events do not have the device ID stored in the internal metadata,
|
||||
# this includes old events as well as those created by appservice, guests,
|
||||
# or with tokens minted with the admin API. For those events, fallback
|
||||
# to using the access token instead.
|
||||
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
|
||||
if msc3970_enabled and event_device_id is not None:
|
||||
if event_device_id is not None:
|
||||
if event_device_id == config.requester.device_id:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
else:
|
||||
# The pre-MSC3970 behaviour is to only include the transaction ID if the
|
||||
# event was sent from the same access token. For regular users, we can use
|
||||
# the access token ID to determine this. For guests, we can't, but since
|
||||
# each guest only has one access token, we can just check that the event was
|
||||
# sent by the same user as the one requesting the event.
|
||||
# Fallback behaviour: only include the transaction ID if the event
|
||||
# was sent from the same access token.
|
||||
#
|
||||
# For regular users, the access token ID can be used to determine this.
|
||||
# This includes access tokens minted with the admin API.
|
||||
#
|
||||
# For guests and appservice users, we can't check the access token ID
|
||||
# so assume it is the same session.
|
||||
event_token_id: Optional[int] = getattr(
|
||||
e.internal_metadata, "token_id", None
|
||||
)
|
||||
if config.requester.user.to_string() == e.sender and (
|
||||
if (
|
||||
(
|
||||
event_token_id is not None
|
||||
and config.requester.access_token_id is not None
|
||||
and event_token_id == config.requester.access_token_id
|
||||
)
|
||||
or config.requester.is_guest
|
||||
or config.requester.app_service
|
||||
):
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
@@ -477,6 +480,17 @@ def serialize_event(
|
||||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
# If the event is a redaction, the field with the redacted event ID appears
|
||||
# in a different location depending on the room version. e.redacts handles
|
||||
# fetching from the proper location; copy it to the other location for forwards-
|
||||
# and backwards-compatibility with clients.
|
||||
if e.type == EventTypes.Redaction and e.redacts is not None:
|
||||
if e.room_version.updated_redaction_rules:
|
||||
d["redacts"] = e.redacts
|
||||
else:
|
||||
d["content"] = dict(d["content"])
|
||||
d["content"]["redacts"] = e.redacts
|
||||
|
||||
only_event_fields = config.only_event_fields
|
||||
if only_event_fields:
|
||||
if not isinstance(only_event_fields, list) or not all(
|
||||
@@ -495,9 +509,6 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, *, msc3970_enabled: bool = False):
|
||||
self._msc3970_enabled = msc3970_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@@ -522,9 +533,7 @@ class EventClientSerializer:
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
|
||||
serialized_event = serialize_event(
|
||||
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
|
||||
)
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
|
||||
@@ -231,7 +231,7 @@ async def _check_sigs_on_pdu(
|
||||
# If this is a join event for a restricted room it may have been authorised
|
||||
# via a different server from the sending server. Check those signatures.
|
||||
if (
|
||||
room_version.msc3083_join_rules
|
||||
room_version.restricted_join_rule
|
||||
and pdu.type == EventTypes.Member
|
||||
and pdu.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in pdu.content
|
||||
|
||||
@@ -983,7 +983,7 @@ class FederationClient(FederationBase):
|
||||
if not room_version:
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
if not room_version.msc2403_knocking and membership == Membership.KNOCK:
|
||||
if not room_version.knock_join_rule and membership == Membership.KNOCK:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This room version does not support knocking",
|
||||
@@ -1069,7 +1069,7 @@ class FederationClient(FederationBase):
|
||||
# * Ensure the signatures are good.
|
||||
#
|
||||
# Otherwise, fallback to the provided event.
|
||||
if room_version.msc3083_join_rules and response.event:
|
||||
if room_version.restricted_join_rule and response.event:
|
||||
event = response.event
|
||||
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
@@ -1195,7 +1195,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
# MSC3083 defines additional error codes for room joins.
|
||||
failover_errcodes = None
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
failover_errcodes = (
|
||||
Codes.UNABLE_AUTHORISE_JOIN,
|
||||
Codes.UNABLE_TO_GRANT_JOIN,
|
||||
|
||||
@@ -63,6 +63,7 @@ from synapse.federation.federation_base import (
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
@@ -137,6 +138,7 @@ class FederationServer(FederationBase):
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
@@ -806,7 +808,7 @@ class FederationServer(FederationBase):
|
||||
raise IncompatibleRoomVersionError(room_version=room_version.identifier)
|
||||
|
||||
# Check that this room supports knocking as defined by its room version
|
||||
if not room_version.msc2403_knocking:
|
||||
if not room_version.knock_join_rule:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
@@ -909,7 +911,7 @@ class FederationServer(FederationBase):
|
||||
errcode=Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
|
||||
if membership_type == Membership.KNOCK and not room_version.knock_join_rule:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
@@ -933,7 +935,7 @@ class FederationServer(FederationBase):
|
||||
# the event is valid to be sent into the room. Currently this is only done
|
||||
# if the user is being joined via restricted join rules.
|
||||
if (
|
||||
room_version.msc3083_join_rules
|
||||
room_version.restricted_join_rule
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
):
|
||||
@@ -1236,9 +1238,18 @@ class FederationServer(FederationBase):
|
||||
logger.info("handling received PDU in room %s: %s", room_id, event)
|
||||
try:
|
||||
with nested_logging_context(event.event_id):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
# We're taking out a lock within a lock, which could
|
||||
# lead to deadlocks if we're not careful. However, it is
|
||||
# safe on this occasion as we only ever take a write
|
||||
# lock when deleting a room, which we would never do
|
||||
# while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME`
|
||||
# lock.
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
DELETE_ROOM_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
except FederationError as e:
|
||||
# XXX: Ideally we'd inform the remote we failed to process
|
||||
# the event, but we can't return an error in the transaction
|
||||
|
||||
@@ -432,15 +432,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
|
||||
@@ -653,6 +653,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
async def store_dehydrated_device(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
device_data: JsonDict,
|
||||
initial_device_display_name: Optional[str] = None,
|
||||
) -> str:
|
||||
@@ -661,6 +662,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
Args:
|
||||
user_id: the user that we are storing the device for
|
||||
device_id: device id supplied by client
|
||||
device_data: the dehydrated device information
|
||||
initial_device_display_name: The display name to use for the device
|
||||
Returns:
|
||||
@@ -668,7 +670,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
"""
|
||||
device_id = await self.check_device_registered(
|
||||
user_id,
|
||||
None,
|
||||
device_id,
|
||||
initial_device_display_name,
|
||||
)
|
||||
old_device_id = await self.store.store_dehydrated_device(
|
||||
@@ -1124,7 +1126,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
)
|
||||
|
||||
if resync:
|
||||
await self.multi_user_device_resync([user_id])
|
||||
# We mark as stale up front in case we get restarted.
|
||||
await self.store.mark_remote_users_device_caches_as_stale([user_id])
|
||||
run_as_background_process(
|
||||
"_maybe_retry_device_resync",
|
||||
self.multi_user_device_resync,
|
||||
[user_id],
|
||||
False,
|
||||
)
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -48,6 +49,9 @@ class DeviceMessageHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine = hs.is_mine
|
||||
if hs.config.experimental.msc3814_enabled:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
# We only need to poke the federation sender explicitly if its on the
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
@@ -303,3 +307,103 @@ class DeviceMessageHandler:
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
self.federation_sender.send_device_messages(destination)
|
||||
|
||||
async def get_events_for_dehydrated_device(
|
||||
self,
|
||||
requester: Requester,
|
||||
device_id: str,
|
||||
since_token: Optional[str],
|
||||
limit: int,
|
||||
) -> JsonDict:
|
||||
"""Fetches up to `limit` events sent to `device_id` starting from `since_token`
|
||||
and returns the new since token. If there are no more messages, returns an empty
|
||||
array.
|
||||
|
||||
Args:
|
||||
requester: the user requesting the messages
|
||||
device_id: ID of the dehydrated device
|
||||
since_token: stream id to start from when fetching messages
|
||||
limit: the number of messages to fetch
|
||||
Returns:
|
||||
A dict containing the to-device messages, as well as a token that the client
|
||||
can provide in the next call to fetch the next batch of messages
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# only allow fetching messages for the dehydrated device id currently associated
|
||||
# with the user
|
||||
dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
|
||||
if dehydrated_device is None:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"No dehydrated device exists",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
dehydrated_device_id, _ = dehydrated_device
|
||||
if device_id != dehydrated_device_id:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"You may only fetch messages for your dehydrated device",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
since_stream_id = 0
|
||||
if since_token:
|
||||
if not since_token.startswith("d"):
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
try:
|
||||
since_stream_id = int(since_token[1:])
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
# if we have a since token, delete any to-device messages before that token
|
||||
# (since we now know that the device has received them)
|
||||
deleted = await self.store.delete_messages_for_device(
|
||||
user_id, device_id, since_stream_id
|
||||
)
|
||||
logger.debug(
|
||||
"Deleted %d to-device messages up to %d for user_id %s device_id %s",
|
||||
deleted,
|
||||
since_stream_id,
|
||||
user_id,
|
||||
device_id,
|
||||
)
|
||||
|
||||
to_token = self.event_sources.get_current_token().to_device_key
|
||||
|
||||
messages, stream_id = await self.store.get_messages_for_device(
|
||||
user_id, device_id, since_stream_id, to_token, limit
|
||||
)
|
||||
|
||||
for message in messages:
|
||||
# Remove the message id before sending to client
|
||||
message_id = message.pop("message_id", None)
|
||||
if message_id:
|
||||
set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
|
||||
|
||||
logger.debug(
|
||||
"Returning %d to-device messages between %d and %d (current token: %d) for "
|
||||
"dehydrated device %s, user_id %s",
|
||||
len(messages),
|
||||
since_stream_id,
|
||||
stream_id,
|
||||
to_token,
|
||||
device_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
return {
|
||||
"events": messages,
|
||||
"next_batch": f"d{stream_id}",
|
||||
}
|
||||
|
||||
@@ -277,7 +277,9 @@ class DirectoryHandler:
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to fetch alias")
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
logging.warning(
|
||||
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
|
||||
)
|
||||
if e.code == 404:
|
||||
fed_result = None
|
||||
else:
|
||||
|
||||
@@ -277,7 +277,7 @@ class EventAuthHandler:
|
||||
True if the proper room version and join rules are set for restricted access.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
if not room_version.restricted_join_rule:
|
||||
return False
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
@@ -292,7 +292,7 @@ class EventAuthHandler:
|
||||
return True
|
||||
|
||||
# also check for MSC3787 behaviour
|
||||
if room_version.msc3787_knock_restricted_join_rule:
|
||||
if room_version.knock_restricted_join_rule:
|
||||
return content_join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
|
||||
return False
|
||||
|
||||
@@ -957,7 +957,7 @@ class FederationHandler:
|
||||
# Note that this requires the /send_join request to come back to the
|
||||
# same server.
|
||||
prev_event_ids = None
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
# Note that the room's state can change out from under us and render our
|
||||
# nice join rules-conformant event non-conformant by the time we build the
|
||||
# event. When this happens, our validation at the end fails and we respond
|
||||
@@ -1581,9 +1581,7 @@ class FederationHandler:
|
||||
event.content["third_party_invite"]["signed"]["token"],
|
||||
)
|
||||
original_invite = None
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
|
||||
)
|
||||
prev_state_ids = await context.get_prev_state_ids(StateFilter.from_types([key]))
|
||||
original_invite_id = prev_state_ids.get(key)
|
||||
if original_invite_id:
|
||||
original_invite = await self.store.get_event(
|
||||
@@ -1636,7 +1634,7 @@ class FederationHandler:
|
||||
token = signed["token"]
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, token)])
|
||||
)
|
||||
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.handlers.directory import DirectoryHandler
|
||||
from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -485,6 +486,7 @@ class EventCreationHandler:
|
||||
self._events_shard_config = self.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
self._notifier = hs.get_notifier()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
@@ -559,8 +561,6 @@ class EventCreationHandler:
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._msc3970_enabled = hs.config.experimental.msc3970_enabled
|
||||
|
||||
async def create_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -738,7 +738,7 @@ class EventCreationHandler:
|
||||
prev_event_id = state_map.get((EventTypes.Member, event.sender))
|
||||
else:
|
||||
prev_state_ids = await unpersisted_context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.Member, None)])
|
||||
StateFilter.from_types([(EventTypes.Member, event.sender)])
|
||||
)
|
||||
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
|
||||
prev_event = (
|
||||
@@ -860,7 +860,7 @@ class EventCreationHandler:
|
||||
return None
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(event.type, None)])
|
||||
StateFilter.from_types([(event.type, event.state_key)])
|
||||
)
|
||||
prev_event_id = prev_state_ids.get((event.type, event.state_key))
|
||||
if not prev_event_id:
|
||||
@@ -876,6 +876,53 @@ class EventCreationHandler:
|
||||
return prev_event
|
||||
return None
|
||||
|
||||
async def get_event_id_from_transaction(
|
||||
self,
|
||||
requester: Requester,
|
||||
txn_id: str,
|
||||
room_id: str,
|
||||
) -> Optional[str]:
|
||||
"""For the given transaction ID and room ID, check if there is a matching event ID.
|
||||
|
||||
Args:
|
||||
requester: The requester making the request in the context of which we want
|
||||
to fetch the event.
|
||||
txn_id: The transaction ID.
|
||||
room_id: The room ID.
|
||||
|
||||
Returns:
|
||||
An event ID if one could be found, None otherwise.
|
||||
"""
|
||||
existing_event_id = None
|
||||
|
||||
# According to the spec, transactions are scoped to a user's device ID.
|
||||
if requester.device_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_device_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return existing_event_id
|
||||
|
||||
# Some requsters don't have device IDs (appservice, guests, and access
|
||||
# tokens minted with the admin API), fallback to checking the access token
|
||||
# ID, which should be close enough.
|
||||
if requester.access_token_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_token_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
|
||||
return existing_event_id
|
||||
|
||||
async def get_event_from_transaction(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -894,35 +941,11 @@ class EventCreationHandler:
|
||||
Returns:
|
||||
An event if one could be found, None otherwise.
|
||||
"""
|
||||
|
||||
if self._msc3970_enabled and requester.device_id:
|
||||
# When MSC3970 is enabled, we lookup for events sent by the same device first,
|
||||
# and fallback to the old behaviour if none were found.
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_device_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
|
||||
# Pre-MSC3970, we looked up for events that were sent by the same session by
|
||||
# using the access token ID.
|
||||
if requester.access_token_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_token_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
|
||||
existing_event_id = await self.get_event_id_from_transaction(
|
||||
requester, txn_id, room_id
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
return None
|
||||
|
||||
async def create_and_send_nonmember_event(
|
||||
@@ -1010,6 +1033,37 @@ class EventCreationHandler:
|
||||
event.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
DELETE_ROOM_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
return await self._create_and_send_nonmember_event_locked(
|
||||
requester=requester,
|
||||
event_dict=event_dict,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
prev_event_ids=prev_event_ids,
|
||||
state_event_ids=state_event_ids,
|
||||
ratelimit=ratelimit,
|
||||
txn_id=txn_id,
|
||||
ignore_shadow_ban=ignore_shadow_ban,
|
||||
outlier=outlier,
|
||||
depth=depth,
|
||||
)
|
||||
|
||||
async def _create_and_send_nonmember_event_locked(
|
||||
self,
|
||||
requester: Requester,
|
||||
event_dict: dict,
|
||||
allow_no_prev_events: bool = False,
|
||||
prev_event_ids: Optional[List[str]] = None,
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
ratelimit: bool = True,
|
||||
txn_id: Optional[str] = None,
|
||||
ignore_shadow_ban: bool = False,
|
||||
outlier: bool = False,
|
||||
depth: Optional[int] = None,
|
||||
) -> Tuple[EventBase, int]:
|
||||
room_id = event_dict["room_id"]
|
||||
|
||||
# If we don't have any prev event IDs specified then we need to
|
||||
# check that the host is in the room (as otherwise populating the
|
||||
# prev events will fail), at which point we may as well check the
|
||||
@@ -1565,12 +1619,11 @@ class EventCreationHandler:
|
||||
if state_entry.state_group in self._external_cache_joined_hosts_updates:
|
||||
return
|
||||
|
||||
state = await state_entry.get_state(
|
||||
self._storage_controllers.state, StateFilter.all()
|
||||
)
|
||||
with opentracing.start_active_span("get_joined_hosts"):
|
||||
joined_hosts = await self.store.get_joined_hosts(
|
||||
event.room_id, state, state_entry
|
||||
joined_hosts = (
|
||||
await self._storage_controllers.state.get_joined_hosts(
|
||||
event.room_id, state_entry
|
||||
)
|
||||
)
|
||||
|
||||
# Note that the expiry times must be larger than the expiry time in
|
||||
@@ -1924,7 +1977,10 @@ class EventCreationHandler:
|
||||
)
|
||||
|
||||
for room_id in room_ids:
|
||||
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
DELETE_ROOM_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
|
||||
|
||||
if not dummy_event_sent:
|
||||
# Did not find a valid user in the room, so remove from future attempts
|
||||
|
||||
@@ -46,6 +46,11 @@ logger = logging.getLogger(__name__)
|
||||
BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
|
||||
|
||||
|
||||
PURGE_HISTORY_LOCK_NAME = "purge_history_lock"
|
||||
|
||||
DELETE_ROOM_LOCK_NAME = "delete_room_lock"
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class PurgeStatus:
|
||||
"""Object tracking the status of a purge request
|
||||
@@ -142,6 +147,7 @@ class PaginationHandler:
|
||||
self._server_name = hs.hostname
|
||||
self._room_shutdown_handler = hs.get_room_shutdown_handler()
|
||||
self._relations_handler = hs.get_relations_handler()
|
||||
self._worker_locks = hs.get_worker_locks_handler()
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
# IDs of rooms in which there currently an active purge *or delete* operation.
|
||||
@@ -356,7 +362,9 @@ class PaginationHandler:
|
||||
"""
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_HISTORY_LOCK_NAME, room_id, write=True
|
||||
):
|
||||
await self._storage_controllers.purge_events.purge_history(
|
||||
room_id, token, delete_local_events
|
||||
)
|
||||
@@ -412,7 +420,10 @@ class PaginationHandler:
|
||||
room_id: room to be purged
|
||||
force: set true to skip checking for joined users.
|
||||
"""
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_multi_read_write_lock(
|
||||
[(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)],
|
||||
write=True,
|
||||
):
|
||||
# first check that we have no users in this room
|
||||
if not force:
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
@@ -471,7 +482,9 @@ class PaginationHandler:
|
||||
|
||||
room_token = from_token.room_key
|
||||
|
||||
async with self.pagination_lock.read(room_id):
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_HISTORY_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
@@ -747,7 +760,9 @@ class PaginationHandler:
|
||||
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_HISTORY_LOCK_NAME, room_id, write=True
|
||||
):
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
||||
self._delete_by_id[
|
||||
delete_id
|
||||
|
||||
@@ -95,13 +95,12 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time",
|
||||
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
|
||||
|
||||
notify_reason_counter = Counter(
|
||||
"synapse_handler_presence_notify_reason", "", ["reason"]
|
||||
"synapse_handler_presence_notify_reason", "", ["locality", "reason"]
|
||||
)
|
||||
state_transition_counter = Counter(
|
||||
"synapse_handler_presence_state_transition", "", ["from", "to"]
|
||||
"synapse_handler_presence_state_transition", "", ["locality", "from", "to"]
|
||||
)
|
||||
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
||||
@@ -567,8 +566,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
for new_state in states:
|
||||
old_state = self.user_to_current_state.get(new_state.user_id)
|
||||
self.user_to_current_state[new_state.user_id] = new_state
|
||||
|
||||
if not old_state or should_notify(old_state, new_state):
|
||||
is_mine = self.is_mine_id(new_state.user_id)
|
||||
if not old_state or should_notify(old_state, new_state, is_mine):
|
||||
state_to_notify.append(new_state)
|
||||
|
||||
stream_id = token
|
||||
@@ -1499,23 +1498,31 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
|
||||
|
||||
def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
|
||||
def should_notify(
|
||||
old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool
|
||||
) -> bool:
|
||||
"""Decides if a presence state change should be sent to interested parties."""
|
||||
user_location = "remote"
|
||||
if is_mine:
|
||||
user_location = "local"
|
||||
|
||||
if old_state == new_state:
|
||||
return False
|
||||
|
||||
if old_state.status_msg != new_state.status_msg:
|
||||
notify_reason_counter.labels("status_msg_change").inc()
|
||||
notify_reason_counter.labels(user_location, "status_msg_change").inc()
|
||||
return True
|
||||
|
||||
if old_state.state != new_state.state:
|
||||
notify_reason_counter.labels("state_change").inc()
|
||||
state_transition_counter.labels(old_state.state, new_state.state).inc()
|
||||
notify_reason_counter.labels(user_location, "state_change").inc()
|
||||
state_transition_counter.labels(
|
||||
user_location, old_state.state, new_state.state
|
||||
).inc()
|
||||
return True
|
||||
|
||||
if old_state.state == PresenceState.ONLINE:
|
||||
if new_state.currently_active != old_state.currently_active:
|
||||
notify_reason_counter.labels("current_active_change").inc()
|
||||
notify_reason_counter.labels(user_location, "current_active_change").inc()
|
||||
return True
|
||||
|
||||
if (
|
||||
@@ -1524,12 +1531,16 @@ def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) ->
|
||||
):
|
||||
# Only notify about last active bumps if we're not currently active
|
||||
if not new_state.currently_active:
|
||||
notify_reason_counter.labels("last_active_change_online").inc()
|
||||
notify_reason_counter.labels(
|
||||
user_location, "last_active_change_online"
|
||||
).inc()
|
||||
return True
|
||||
|
||||
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Always notify for a transition where last active gets bumped.
|
||||
notify_reason_counter.labels("last_active_change_not_online").inc()
|
||||
notify_reason_counter.labels(
|
||||
user_location, "last_active_change_not_online"
|
||||
).inc()
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -1989,7 +2000,7 @@ def handle_update(
|
||||
)
|
||||
|
||||
# Check whether the change was something worth notifying about
|
||||
if should_notify(prev_state, new_state):
|
||||
if should_notify(prev_state, new_state, is_mine):
|
||||
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
|
||||
persist_and_notify = True
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ class ProfileHandler:
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
profileinfo = await self.store.get_profileinfo(target_user)
|
||||
if profileinfo.display_name is None:
|
||||
if profileinfo.display_name is None and profileinfo.avatar_url is None:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
|
||||
return {
|
||||
@@ -163,7 +163,7 @@ class ProfileHandler:
|
||||
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
|
||||
)
|
||||
|
||||
displayname_to_set: Optional[str] = new_displayname
|
||||
displayname_to_set: Optional[str] = new_displayname.strip()
|
||||
if new_displayname == "":
|
||||
displayname_to_set = None
|
||||
|
||||
|
||||
@@ -143,15 +143,10 @@ class RegistrationHandler:
|
||||
assigned_user_id: Optional[str] = None,
|
||||
inhibit_user_in_use_error: bool = False,
|
||||
) -> None:
|
||||
if types.contains_invalid_mxid_characters(
|
||||
localpart, self.hs.config.experimental.msc4009_e164_mxids
|
||||
):
|
||||
extra_chars = (
|
||||
"=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./"
|
||||
)
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'",
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./+'",
|
||||
Codes.INVALID_USERNAME,
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user