Compare commits
265 Commits
shay/add_c
...
v1.92.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a5bf74372 | ||
|
|
efe778a0b8 | ||
|
|
7e98d382f9 | ||
|
|
fd50a9b47c | ||
|
|
55c20da4a3 | ||
|
|
9de615b3aa | ||
|
|
c9282baf03 | ||
|
|
1940d990a3 | ||
|
|
a2b8814d64 | ||
|
|
79aa26936f | ||
|
|
d77154be01 | ||
|
|
0425dd28f4 | ||
|
|
d35bed8369 | ||
|
|
dcb2778341 | ||
|
|
721346631e | ||
|
|
f84baecb6f | ||
|
|
1cd0715a0f | ||
|
|
748c38921c | ||
|
|
4382d57640 | ||
|
|
8065eea6c7 | ||
|
|
e9eb26e3af | ||
|
|
dcd3698e1f | ||
|
|
b85c3485b1 | ||
|
|
93f2fdd8d1 | ||
|
|
6525fd65ee | ||
|
|
ed5e8a77ca | ||
|
|
3de82bb2af | ||
|
|
a2e0d4cd60 | ||
|
|
05d824526a | ||
|
|
8c56e18e47 | ||
|
|
ebd8374fb5 | ||
|
|
62a1a9be52 | ||
|
|
e9235d92f2 | ||
|
|
9ec3da06da | ||
|
|
001fc7bd19 | ||
|
|
63b51ef3fb | ||
|
|
2d72367367 | ||
|
|
692ee2af19 | ||
|
|
40901af5e0 | ||
|
|
1bf143699c | ||
|
|
501da8ecd8 | ||
|
|
224c2bbcfa | ||
|
|
4379d3ef63 | ||
|
|
1511a55539 | ||
|
|
c0bbad8a96 | ||
|
|
743860e6a6 | ||
|
|
e54c1d4ed3 | ||
|
|
84f441f88f | ||
|
|
ed6de4b2d4 | ||
|
|
82699428e3 | ||
|
|
fcf7a5759e | ||
|
|
a8a46b1336 | ||
|
|
5c9402b9fd | ||
|
|
daf11e26ef | ||
|
|
5856a8ba42 | ||
|
|
aeeca2a62e | ||
|
|
efdb87c898 | ||
|
|
5427cc20b9 | ||
|
|
e691243e19 | ||
|
|
0538e3e2db | ||
|
|
e3333bacff | ||
|
|
851cbdcb57 | ||
|
|
33fa82a34c | ||
|
|
23f88f9c59 | ||
|
|
020ff1afe3 | ||
|
|
7064b4bcf3 | ||
|
|
18279631e9 | ||
|
|
85118420a2 | ||
|
|
ec662bbe41 | ||
|
|
4adaba9acf | ||
|
|
7cd79ce051 | ||
|
|
86ecd341ec | ||
|
|
873971a8b9 | ||
|
|
da162cbe4e | ||
|
|
19a1cda084 | ||
|
|
dffe095642 | ||
|
|
3b3fed7229 | ||
|
|
3f17178728 | ||
|
|
803f63df1c | ||
|
|
0ba17777be | ||
|
|
69048f7b48 | ||
|
|
8aa5479986 | ||
|
|
b657e89005 | ||
|
|
bc72d803d5 | ||
|
|
6d7c63fcc6 | ||
|
|
7dbac123f9 | ||
|
|
d6ae4041a4 | ||
|
|
358896e1b8 | ||
|
|
79c349dfb8 | ||
|
|
1e5a0e07a7 | ||
|
|
35d260d065 | ||
|
|
07c0875aa5 | ||
|
|
406ff3eb62 | ||
|
|
bd558a6dc3 | ||
|
|
2d15e39684 | ||
|
|
54317d34b7 | ||
|
|
6130afb862 | ||
|
|
0aba4a4eaa | ||
|
|
54a51ff6c1 | ||
|
|
eb0dbab15b | ||
|
|
0377cb4fab | ||
|
|
8a4fb7a6ba | ||
|
|
8c3bcea2da | ||
|
|
4513b36a75 | ||
|
|
47c629bb27 | ||
|
|
ad3f43be9a | ||
|
|
4347473946 | ||
|
|
29638220ab | ||
|
|
837f28ce74 | ||
|
|
4ce32ade5a | ||
|
|
6fc411c7bf | ||
|
|
e21ff0f048 | ||
|
|
b80ff1602e | ||
|
|
d834a80a12 | ||
|
|
9ff84bccbb | ||
|
|
614efc488b | ||
|
|
7f4b413690 | ||
|
|
efd4d06d76 | ||
|
|
dac97642e4 | ||
|
|
0328b56468 | ||
|
|
4581809846 | ||
|
|
3dfe5c0270 | ||
|
|
8e09b8aecb | ||
|
|
a476d5048b | ||
|
|
f3dc6dc19f | ||
|
|
8af3f33d84 | ||
|
|
81a6f8c9ae | ||
|
|
9d3713d6d5 | ||
|
|
b57630c507 | ||
|
|
340f08c6f7 | ||
|
|
8da3c2185b | ||
|
|
eca592b121 | ||
|
|
34b5db1fbc | ||
|
|
ec8499206e | ||
|
|
4f6da0dba0 | ||
|
|
84ae2e3f6f | ||
|
|
d98a43d922 | ||
|
|
0a5f4f7665 | ||
|
|
f0a860908b | ||
|
|
9c462f18a4 | ||
|
|
4f5bccbbba | ||
|
|
01a45869f0 | ||
|
|
ca5d5de79b | ||
|
|
a51b0862a1 | ||
|
|
8fe1fd906a | ||
|
|
90ad836ed8 | ||
|
|
5eb3fd785b | ||
|
|
7cbb2a00d1 | ||
|
|
a4102d2a5f | ||
|
|
190c990a76 | ||
|
|
b7695ac388 | ||
|
|
1fb5a7ad5d | ||
|
|
fa2c116bef | ||
|
|
e02f4b7de2 | ||
|
|
21407c6709 | ||
|
|
ae55cc1e6b | ||
|
|
0c6142c4a1 | ||
|
|
fee0195b27 | ||
|
|
76b2218599 | ||
|
|
ea4ece3fcc | ||
|
|
68b2611783 | ||
|
|
a719b703d9 | ||
|
|
a461f1f846 | ||
|
|
f9f3e89354 | ||
|
|
f98f4f2e16 | ||
|
|
58f8305114 | ||
|
|
96529c4236 | ||
|
|
6dc019d9dd | ||
|
|
8d2a5586f7 | ||
|
|
76e392b0fa | ||
|
|
d4ea465496 | ||
|
|
8ebfd577e2 | ||
|
|
dbee081d14 | ||
|
|
99b7b801c3 | ||
|
|
641ff9ef7e | ||
|
|
05f8dada8b | ||
|
|
654902a758 | ||
|
|
4a711bf379 | ||
|
|
fc566cdf0a | ||
|
|
3b6208b835 | ||
|
|
3b8348b06e | ||
|
|
5c7364fea5 | ||
|
|
f08d05dd2c | ||
|
|
e1fa42249c | ||
|
|
fc1e534e41 | ||
|
|
835174180b | ||
|
|
fd44053b84 | ||
|
|
ad52db3b5c | ||
|
|
67f9e5293e | ||
|
|
19796e20aa | ||
|
|
40a3583ba1 | ||
|
|
cb6e2c6cc7 | ||
|
|
8e8431bc6e | ||
|
|
69699a9bd1 | ||
|
|
6d81aec09f | ||
|
|
e625c3dca0 | ||
|
|
199c270947 | ||
|
|
1c802de626 | ||
|
|
c692283751 | ||
|
|
43ee5d5bac | ||
|
|
1768dd3c27 | ||
|
|
0d522b58a6 | ||
|
|
b0e66721a5 | ||
|
|
6396527015 | ||
|
|
d2f46ae370 | ||
|
|
85e0541db1 | ||
|
|
cba2df20b5 | ||
|
|
8d3656b994 | ||
|
|
20ae617d14 | ||
|
|
2cacd0849a | ||
|
|
204b66c203 | ||
|
|
5bdf01fccd | ||
|
|
36c6b92bfc | ||
|
|
8eb7bb975e | ||
|
|
3bdb9b07fd | ||
|
|
0371a354cf | ||
|
|
ae391db777 | ||
|
|
d7fc87d973 | ||
|
|
224ef0b669 | ||
|
|
a4243183f0 | ||
|
|
92014fbf72 | ||
|
|
4ccfa16081 | ||
|
|
7c7bd9898b | ||
|
|
b516d91999 | ||
|
|
2328e90fbb | ||
|
|
5e82b07d2c | ||
|
|
c9bf644fa0 | ||
|
|
a704a35dd7 | ||
|
|
e55a9b3e41 | ||
|
|
6774f265b4 | ||
|
|
6e731e86bf | ||
|
|
c971698bff | ||
|
|
7477f43fd8 | ||
|
|
3710fea19d | ||
|
|
df8c8a4f45 | ||
|
|
8a529e4fb6 | ||
|
|
f25b0f8808 | ||
|
|
677272caed | ||
|
|
2481b7dfa4 | ||
|
|
f19dd39dfc | ||
|
|
b07b14b494 | ||
|
|
561d06b481 | ||
|
|
39d131b016 | ||
|
|
ce857c05d5 | ||
|
|
cc780b3f77 | ||
|
|
4cf9f92f39 | ||
|
|
95a96b21eb | ||
|
|
c303eca8cc | ||
|
|
c8e81898b6 | ||
|
|
861752b3aa | ||
|
|
1294d10c70 | ||
|
|
718d7dfef2 | ||
|
|
664ba14080 | ||
|
|
649848627c | ||
|
|
670d590f8a | ||
|
|
07d7cbfe69 | ||
|
|
cd8b73aa97 | ||
|
|
53aa26eddc | ||
|
|
a587de96b8 | ||
|
|
411ba44790 | ||
|
|
aea94ca8cd | ||
|
|
9345361c6b | ||
|
|
13fc89148c | ||
|
|
10ed3e233e | ||
|
|
472c2c72f6 |
@@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -46,13 +47,12 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
for version in ("3.9", "3.10", "3.11", "3.12.0-rc.1")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
@@ -71,7 +71,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -133,11 +133,6 @@ if not IS_PR:
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -29,6 +29,16 @@ jobs:
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
|
||||
shell: bash
|
||||
run: |
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -61,7 +71,9 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
labels: |
|
||||
gitsha1=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
4
.github/workflows/latest_deps.yml
vendored
4
.github/workflows/latest_deps.yml
vendored
@@ -57,8 +57,8 @@ jobs:
|
||||
# `pip install matrix-synapse[all]` as closely as possible.
|
||||
- run: poetry update --no-dev
|
||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- name: Remove unhelpful options from mypy config
|
||||
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
needs: check_repo
|
||||
|
||||
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
@@ -144,7 +144,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
||||
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -320,7 +320,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
@@ -362,7 +362,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
python-version: ["pypy-3.8"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -477,7 +477,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
- python-version: "3.8"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
|
||||
15
.github/workflows/twisted_trunk.yml
vendored
15
.github/workflows/twisted_trunk.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
# NB: inputs are only present when this workflow is dispatched manually.
|
||||
# (The default below is the default field value in the form to trigger
|
||||
# a manual dispatch). Otherwise the inputs will evaluate to null.
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
@@ -49,10 +52,10 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- name: Remove unhelpful options from mypy config
|
||||
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
@@ -96,7 +99,11 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -34,6 +34,7 @@ __pycache__/
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
/homeserver-config-overrides.d
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
|
||||
3245
CHANGES.md
3245
CHANGES.md
File diff suppressed because it is too large
Load Diff
58
Cargo.lock
generated
58
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.71"
|
||||
version = "1.0.75"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
|
||||
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -132,9 +132,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.19"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -182,9 +182,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -229,9 +229,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
|
||||
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -273,9 +273,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -291,9 +291,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.8.4"
|
||||
version = "1.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
|
||||
checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -302,9 +314,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.2"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
|
||||
checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -320,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.164"
|
||||
version = "1.0.188"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
|
||||
checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.164"
|
||||
version = "1.0.188"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
|
||||
checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.99"
|
||||
version = "1.0.105"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
|
||||
checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -374,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
version = "2.0.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -3,3 +3,4 @@
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
resolver = "2"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou.
|
||||
@@ -1 +0,0 @@
|
||||
Improve `/messages` response time by avoiding backfill when we already have messages to return.
|
||||
@@ -1 +0,0 @@
|
||||
Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying.
|
||||
@@ -1 +0,0 @@
|
||||
Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms.
|
||||
@@ -1 +0,0 @@
|
||||
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
|
||||
@@ -1 +0,0 @@
|
||||
Avoid invalidating a cache that was just prefilled.
|
||||
@@ -1 +0,0 @@
|
||||
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
|
||||
@@ -1 +0,0 @@
|
||||
Document `looping_call()` functionality that will wait for the given function to finish before scheduling another.
|
||||
@@ -1 +0,0 @@
|
||||
Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`.
|
||||
@@ -1 +0,0 @@
|
||||
Allow for the configuration of max request retries and min/max retry delays in the matrix federation client.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend.
|
||||
@@ -1 +0,0 @@
|
||||
Fix Sytest environmental variable evaluation in CI.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html).
|
||||
@@ -1 +0,0 @@
|
||||
Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic.
|
||||
@@ -1 +0,0 @@
|
||||
Fix typo in MSC number in faster remote room join architecture doc.
|
||||
@@ -1 +0,0 @@
|
||||
Fix harmless exceptions being printed when running the port DB script.
|
||||
@@ -1 +0,0 @@
|
||||
Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly.
|
||||
@@ -1 +0,0 @@
|
||||
Fix sqlite `user_filters` upgrade introduced in v1.86.0.
|
||||
@@ -1 +0,0 @@
|
||||
Add spam checker module API for logins.
|
||||
@@ -769,7 +769,7 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
global CONFIG_JSON
|
||||
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
|
||||
try:
|
||||
with open(config_path, "r") as config:
|
||||
with open(config_path) as config:
|
||||
syn_cmd.config = json.load(config)
|
||||
try:
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"enable": true,
|
||||
"expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}",
|
||||
"expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
|
||||
"iconColor": "purple",
|
||||
"name": "deploys",
|
||||
"titleFormat": "Deployed {{version}}"
|
||||
|
||||
84
debian/changelog
vendored
84
debian/changelog
vendored
@@ -1,3 +1,87 @@
|
||||
matrix-synapse-py3 (1.92.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Sep 2023 11:59:23 +0200
|
||||
|
||||
matrix-synapse-py3 (1.91.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.91.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Sep 2023 14:59:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.92.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Sep 2023 11:21:43 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Sep 2023 14:03:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Aug 2023 11:18:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 23 Aug 2023 09:47:18 -0700
|
||||
|
||||
matrix-synapse-py3 (1.90.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
|
||||
|
||||
matrix-synapse-py3 (1.88.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
|
||||
|
||||
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.87.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.87.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
|
||||
|
||||
matrix-synapse-py3 (1.86.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.86.0.
|
||||
|
||||
@@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
@@ -62,33 +62,29 @@ FROM docker.io/library/${distro}
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
|
||||
@@ -35,7 +35,11 @@ server {
|
||||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
{% if enable_redis %}
|
||||
redis:
|
||||
enabled: true
|
||||
{% if using_unix_sockets %}
|
||||
path: /tmp/redis.sock
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
|
||||
@@ -19,7 +19,11 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
{% if using_unix_sockets %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
|
||||
{% else %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
{% endif %}
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -8,7 +8,11 @@ worker_name: "{{ name }}"
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
{% if using_unix_sockets %}
|
||||
path: "/run/worker.{{ port }}"
|
||||
{% else %}
|
||||
port: {{ port }}
|
||||
{% endif %}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
|
||||
@@ -36,12 +36,17 @@ listeners:
|
||||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
{% if SYNAPSE_USE_UNIX_SOCKET %}
|
||||
# Unix sockets don't care about TLS or IP addresses or ports
|
||||
- path: '/run/main_public.sock'
|
||||
type: http
|
||||
{% else %}
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
{% endif %}
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
@@ -57,8 +62,11 @@ database:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
{% if not SYNAPSE_USE_UNIX_SOCKET %}
|
||||
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
|
||||
@@ -74,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
# Obviously, these would only be used with the UNIX socket option
|
||||
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
||||
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
@@ -407,11 +410,15 @@ def add_worker_roles_to_shared_config(
|
||||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
@@ -423,10 +430,15 @@ def add_worker_roles_to_shared_config(
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
@@ -718,17 +730,29 @@ def generate_worker_files(
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# Convenience helper for if using unix sockets instead of host:port
|
||||
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
listeners: List[Any]
|
||||
if using_unix_sockets:
|
||||
listeners = [
|
||||
{
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
else:
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
@@ -769,7 +793,17 @@ def generate_worker_files(
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
# This list ends up being part of the command line to curl, (curl added support for
|
||||
# Unix sockets in version 7.40).
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls = [
|
||||
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
||||
# The scheme and hostname from the following URL are ignored.
|
||||
# The only thing that matters is the path `/health`
|
||||
"http://localhost/health"
|
||||
]
|
||||
else:
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
@@ -806,8 +840,12 @@ def generate_worker_files(
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls.append(
|
||||
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
||||
)
|
||||
else:
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
@@ -823,9 +861,10 @@ def generate_worker_files(
|
||||
# Then a worker config file
|
||||
convert(
|
||||
"/conf/worker.yaml.j2",
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
f"/conf/workers/{worker_name}.yaml",
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
@@ -846,8 +885,13 @@ def generate_worker_files(
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
if using_unix_sockets:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server unix:/run/worker.{port};\n"
|
||||
|
||||
else:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
@@ -877,10 +921,15 @@ def generate_worker_files(
|
||||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
if using_unix_sockets:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
}
|
||||
else:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -890,6 +939,7 @@ def generate_worker_files(
|
||||
appservice_registrations=appservice_registrations,
|
||||
enable_redis=workers_in_use,
|
||||
workers_in_use=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -900,6 +950,7 @@ def generate_worker_files(
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
@@ -909,6 +960,7 @@ def generate_worker_files(
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
enable_redis=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
convert(
|
||||
|
||||
@@ -82,7 +82,7 @@ def generate_config_from_template(
|
||||
with open(filename) as handle:
|
||||
value = handle.read()
|
||||
else:
|
||||
log("Generating a random secret for {}".format(secret))
|
||||
log(f"Generating a random secret for {secret}")
|
||||
value = codecs.encode(os.urandom(32), "hex").decode()
|
||||
with open(filename, "w") as handle:
|
||||
handle.write(value)
|
||||
|
||||
@@ -97,6 +97,7 @@
|
||||
- [Cancellation](development/synapse_architecture/cancellation.md)
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [Streams](development/synapse_architecture/streams.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Account validity API
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows a server administrator to manage the validity of an account. To
|
||||
use it, you must enable the account validity feature (under
|
||||
`account_validity`) in Synapse's configuration.
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Shared-Secret Registration
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
@@ -146,6 +146,7 @@ Body parameters:
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
granting them access to the Admin API, among other things.
|
||||
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
|
||||
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
|
||||
|
||||
Note: the `password` field must also be set if both of the following are true:
|
||||
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
|
||||
@@ -217,7 +218,9 @@ The following parameters should be set in the URL:
|
||||
- `name` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users.
|
||||
Defaults to `true` to include guest users.
|
||||
Defaults to `true` to include guest users. This parameter is not supported when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
- `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from
|
||||
the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results.
|
||||
- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users.
|
||||
Defaults to `false` to exclude deactivated users.
|
||||
- `limit` - string representing a positive integer - Is optional but is used for pagination,
|
||||
@@ -239,9 +242,13 @@ The following parameters should be set in the URL:
|
||||
- `displayname` - Users are ordered alphabetically by `displayname`.
|
||||
- `avatar_url` - Users are ordered alphabetically by avatar URL.
|
||||
- `creation_ts` - Users are ordered by when the users was created in ms.
|
||||
- `last_seen_ts` - Users are ordered by when the user was lastly seen in ms.
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
@@ -266,6 +273,7 @@ The following fields are returned in the JSON response body:
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
- `creation_ts` - integer - The user's creation timestamp in ms.
|
||||
- `last_seen_ts` - integer - The user's last activity timestamp in ms.
|
||||
|
||||
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
@@ -384,6 +392,8 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
|
||||
## Reset password
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Changes the password of another user. This will automatically log the user out of all their devices.
|
||||
|
||||
The api is:
|
||||
@@ -407,6 +417,8 @@ The parameter `logout_devices` is optional and defaults to `true`.
|
||||
|
||||
## Get whether a user is a server administrator or not
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
The api is:
|
||||
|
||||
```
|
||||
@@ -424,6 +436,8 @@ A response body like the following is returned:
|
||||
|
||||
## Change whether a user is a server administrator or not
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Note that you cannot demote yourself.
|
||||
|
||||
The api is:
|
||||
@@ -717,6 +731,8 @@ delete largest/smallest or newest/oldest files first.
|
||||
|
||||
## Login as a user
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
when admins wish to do actions on behalf of a user.
|
||||
|
||||
@@ -729,7 +745,8 @@ POST /_synapse/admin/v1/users/<user_id>/login
|
||||
|
||||
An optional `valid_until_ms` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
do not expire. Note that this API does not allow a user to login as themselves
|
||||
(to create more tokens).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
@@ -1180,7 +1197,7 @@ The following parameters should be set in the URL:
|
||||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
## Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
@@ -1198,7 +1215,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
## Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -1237,7 +1254,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
## Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
2766
docs/changelogs/CHANGES-2022.md
Normal file
2766
docs/changelogs/CHANGES-2022.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
|
||||
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
@@ -370,6 +370,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -12,7 +12,7 @@ Note that this schedule might be modified depending on the availability of the
|
||||
Synapse team, e.g. releases may be skipped to avoid holidays.
|
||||
|
||||
Release announcements can be found in the
|
||||
[release category of the Matrix blog](https://matrix.org/blog/category/releases).
|
||||
[release category of the Matrix blog](https://matrix.org/category/releases).
|
||||
|
||||
## Bugfix releases
|
||||
|
||||
@@ -34,4 +34,4 @@ be held to be released together.
|
||||
|
||||
In some cases, a pre-disclosure of a security release will be issued as a notice
|
||||
to Synapse operators that there is an upcoming security release. These can be
|
||||
found in the [security category of the Matrix blog](https://matrix.org/blog/category/security).
|
||||
found in the [security category of the Matrix blog](https://matrix.org/category/security).
|
||||
|
||||
157
docs/development/synapse_architecture/streams.md
Normal file
157
docs/development/synapse_architecture/streams.md
Normal file
@@ -0,0 +1,157 @@
|
||||
## Streams
|
||||
|
||||
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
|
||||
).
|
||||
Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
|
||||
For example:
|
||||
|
||||
- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
|
||||
- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
|
||||
- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
|
||||
|
||||
See [`synapse.replication.tcp.streams`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
|
||||
) for the full list of streams.
|
||||
|
||||
It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
|
||||
To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
|
||||
https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
|
||||
).
|
||||
|
||||
### Definition
|
||||
|
||||
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
|
||||
Only "writers" can add facts to a stream, and there may be multiple writers.
|
||||
|
||||
Each fact has an ID, called its "stream ID".
|
||||
Readers should only process facts in ascending stream ID order.
|
||||
|
||||
Roughly speaking, each stream is backed by a database table.
|
||||
It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
|
||||
Typically, a fact is expressed with a single row in its backing table.[^2]
|
||||
Within a stream, no two facts may have the same stream_id.
|
||||
|
||||
> _Aside_. Some additional notes on streams' backing tables.
|
||||
>
|
||||
> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
|
||||
> 2. The backing tables may have other uses.
|
||||
> For example, the events table serves backs the events stream, and is read when processing new events.
|
||||
> But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
|
||||
> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
|
||||
|
||||
Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
|
||||
Stream writers need to track the completion of each stream fact.
|
||||
In the happy case, completion means a fact has been written to the stream table.
|
||||
But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
|
||||
Once completed, the rows written with that stream ID are fixed, and no new rows
|
||||
will be inserted with that ID.
|
||||
|
||||
### Current stream ID
|
||||
|
||||
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
|
||||
|
||||
> The current stream ID _for a writer W_ is the largest stream ID such that
|
||||
> all transactions added by W with equal or smaller ID have completed.
|
||||
|
||||
Similarly, there is a "linear" notion of current stream ID:
|
||||
|
||||
> The "linear" current stream ID is the largest stream ID such that
|
||||
> all facts (added by any writer) with equal or smaller ID have completed.
|
||||
|
||||
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
|
||||
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
|
||||
|
||||
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
|
||||
|
||||
For single-writer streams, the per-writer current ID and the linear current ID are the same.
|
||||
Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
|
||||
|
||||
|
||||
_Example_.
|
||||
Consider a single-writer stream which is initially at ID 1.
|
||||
|
||||
| Action | Current stream ID | Notes |
|
||||
|------------|-------------------|-------------------------------------------------|
|
||||
| | 1 | |
|
||||
| Reserve 2 | 1 | |
|
||||
| Reserve 3 | 1 | |
|
||||
| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
|
||||
| Complete 2 | 3 | current ID jumps from 1 -> 3 |
|
||||
| Reserve 4 | 3 | |
|
||||
| Reserve 5 | 3 | |
|
||||
| Reserve 6 | 3 | |
|
||||
| Complete 5 | 3 | |
|
||||
| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
|
||||
| Complete 6 | 6 | |
|
||||
|
||||
|
||||
### Multi-writer streams
|
||||
|
||||
There are two ways to view a multi-writer stream.
|
||||
|
||||
1. Treat it as a collection of distinct single-writer streams, one
|
||||
for each writer.
|
||||
2. Treat it as a single stream.
|
||||
|
||||
The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
|
||||
However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
|
||||
In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
|
||||
The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
|
||||
|
||||
Note that a multi-writer stream can be viewed in both ways.
|
||||
For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
|
||||
But the background process that works through events treats them as a single linear stream.
|
||||
|
||||
Another useful example is the cache invalidation stream.
|
||||
The facts this stream holds are instructions to "you should now invalidate these cache entries".
|
||||
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
|
||||
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
|
||||
|
||||
### Writing to streams
|
||||
|
||||
Writers need to track:
|
||||
- track their current position (i.e. its own per-writer stream ID).
|
||||
- their facts currently awaiting completion.
|
||||
|
||||
At startup,
|
||||
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
|
||||
- there are no facts awaiting completion.
|
||||
|
||||
To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
|
||||
|
||||
To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
|
||||
|
||||
To complete a fact, first remove it from your map of facts currently awaiting completion.
|
||||
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
|
||||
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
|
||||
|
||||
### Subscribing to streams
|
||||
|
||||
Readers need to track the current position of every writer.
|
||||
|
||||
At startup, they can find this by contacting each writer with a `REPLICATE` message,
|
||||
requesting that all writers reply describing their current position in their streams.
|
||||
Writers reply with a `POSITION` message.
|
||||
|
||||
To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
|
||||
The `RDATA` itself is not a self-contained representation of the fact;
|
||||
readers will have to query the stream tables for the full details.
|
||||
Readers must also advance their record of the writer's current position for that stream.
|
||||
|
||||
# Summary
|
||||
|
||||
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
|
||||
|
||||
|
||||
---
|
||||
|
||||
[^1]: we use the word _fact_ here for two reasons.
|
||||
Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
|
||||
Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
|
||||
|
||||
[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
|
||||
In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
|
||||
|
||||
[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
|
||||
nowadays it's done via Redis's Pubsub.
|
||||
@@ -95,7 +95,7 @@ matrix.example.com {
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
reverse_proxy localhost:8008
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
|
||||
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
A structured logging system can be useful when your logs are destined for a
|
||||
machine to parse and process. By maintaining its machine-readable characteristics,
|
||||
it enables more efficient searching and aggregations when consumed by software
|
||||
such as the "ELK stack".
|
||||
such as the [ELK stack](https://opensource.com/article/18/9/open-source-log-aggregation-tools).
|
||||
|
||||
Synapse's structured logging system is configured via the file that Synapse's
|
||||
`log_config` config option points to. The file should include a formatter which
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: background_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: event_persister1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
worker_name: event_persister1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
|
||||
@@ -87,6 +87,57 @@ process, for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.90.0
|
||||
|
||||
## App service query parameter authorization is now a configuration option
|
||||
|
||||
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
|
||||
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
|
||||
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
|
||||
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
|
||||
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
|
||||
This option defaults to false, but can be activated by adding
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
to your configuration.
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
|
||||
Application services can no longer call `/register` with a `user` property to create new users.
|
||||
The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.7 to v3.8.
|
||||
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian
|
||||
packages or Docker images, no action is required.
|
||||
|
||||
## Removal of `worker_replication_*` settings
|
||||
|
||||
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
|
||||
are being removed in this release of Synapse:
|
||||
|
||||
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
|
||||
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
|
||||
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
|
||||
|
||||
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
|
||||
(or create one if necessary). This is required if you have ***any*** workers at all;
|
||||
administrators of single-process (monolith) installations don't need to do anything.
|
||||
|
||||
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
|
||||
|
||||
|
||||
# Upgrading to v1.86.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Registration Tokens
|
||||
|
||||
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
|
||||
|
||||
This API allows you to manage tokens which can be used to authenticate
|
||||
registration requests, as proposed in
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
|
||||
|
||||
@@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md).
|
||||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
|
||||
Unix socket support (_Added in Synapse 1.89.0_):
|
||||
* `path`: A path and filename for a Unix socket. Make sure it is located in a
|
||||
directory with read and write permissions, and that it already exists (the directory
|
||||
will not be created). Defaults to `None`.
|
||||
* **Note**: The use of both `path` and `port` options for the same `listener` is not
|
||||
compatible.
|
||||
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
|
||||
* Other options that would not make sense to use with a UNIX socket, such as
|
||||
`bind_addresses` and `tls` will be ignored and can be removed.
|
||||
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
|
||||
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
|
||||
Also make sure that `metrics` is not included in `resources` -> `names`
|
||||
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
@@ -474,7 +488,7 @@ Valid resource names are:
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
|
||||
|
||||
* `openid`: OpenID authentication. See [here](../../openid.md).
|
||||
|
||||
@@ -533,6 +547,22 @@ listeners:
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
Example configuration #3:
|
||||
```yaml
|
||||
listeners:
|
||||
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
|
||||
# lightweight interprocess communication without TCP/IP overhead, avoid port
|
||||
# conflicts, and providing enhanced security through system file permissions.
|
||||
#
|
||||
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
#
|
||||
- path: /var/run/synapse/main_public.sock
|
||||
type: http
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
---
|
||||
### `manhole_settings`
|
||||
|
||||
@@ -1212,6 +1242,14 @@ like sending a federation transaction.
|
||||
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
|
||||
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
|
||||
|
||||
The following options control the retry logic when communicating with a specific homeserver destination.
|
||||
Unlike the previous configuration options, these values apply across all requests
|
||||
for a given destination and the state of the backoff is stored in the database.
|
||||
|
||||
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
|
||||
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
|
||||
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation:
|
||||
@@ -1220,6 +1258,9 @@ federation:
|
||||
max_long_retry_delay: 100s
|
||||
max_short_retries: 5
|
||||
max_long_retries: 20
|
||||
destination_min_retry_interval: 30s
|
||||
destination_retry_multiplier: 5
|
||||
destination_max_retry_interval: 12h
|
||||
```
|
||||
---
|
||||
## Caching
|
||||
@@ -2807,6 +2848,20 @@ Example configuration:
|
||||
```yaml
|
||||
track_appservice_user_ips: true
|
||||
```
|
||||
---
|
||||
### `use_appservice_legacy_authorization`
|
||||
|
||||
Whether to send the application service access tokens via the `access_token` query parameter
|
||||
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
|
||||
access tokens via a query parameter.
|
||||
|
||||
**Enabling this option is considered insecure and is not recommended. **
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
|
||||
---
|
||||
### `macaroon_secret_key`
|
||||
|
||||
@@ -2970,6 +3025,16 @@ enable SAML login. You can either put your entire pysaml config inline using the
|
||||
option, or you can specify a path to a psyaml config file with the sub-option `config_path`.
|
||||
This setting has the following sub-options:
|
||||
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config.
|
||||
Default values will be used for the `entityid` and `service` settings,
|
||||
so it is not normally necessary to specify them unless you need to
|
||||
@@ -3121,7 +3186,7 @@ Options for each entry include:
|
||||
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
|
||||
@@ -3139,6 +3204,14 @@ Options for each entry include:
|
||||
|
||||
* `client_secret`: oauth2 client secret to use. May be omitted if
|
||||
`client_secret_jwt_key` is given, or if `client_auth_method` is 'none'.
|
||||
Must be omitted if `client_secret_path` is specified.
|
||||
|
||||
* `client_secret_path`: path to the oauth2 client secret to use. With that
|
||||
it's not necessary to leak secrets into the config file itself.
|
||||
Mutually exclusive with `client_secret`. Can be omitted if
|
||||
`client_secret_jwt_key` is specified.
|
||||
|
||||
*Added in Synapse 1.91.0.*
|
||||
|
||||
* `client_secret_jwt_key`: Alternative to client_secret: details of a key used
|
||||
to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
@@ -3336,7 +3409,18 @@ Enable Central Authentication Service (CAS) for registration and login.
|
||||
Has the following sub-options:
|
||||
* `enabled`: Set this to true to enable authorization against a CAS server.
|
||||
Defaults to false.
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `server_url`: The URL of the CAS authorization endpoint.
|
||||
* `protocol_version`: The CAS protocol version, defaults to none (version 3 is required if you want to use "required_attributes").
|
||||
* `displayname_attribute`: The attribute of the CAS response to use as the display name.
|
||||
If no name is given here, no displayname will be set.
|
||||
* `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes
|
||||
@@ -3350,6 +3434,7 @@ Example configuration:
|
||||
cas_config:
|
||||
enabled: true
|
||||
server_url: "https://cas-server.com"
|
||||
protocol_version: 3
|
||||
displayname_attribute: name
|
||||
required_attributes:
|
||||
userGroup: "staff"
|
||||
@@ -3576,6 +3661,7 @@ This option has the following sub-options:
|
||||
* `prefer_local_users`: Defines whether to prefer local users in search query results.
|
||||
If set to true, local users are more likely to appear above remote users when searching the
|
||||
user directory. Defaults to false.
|
||||
* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3583,6 +3669,7 @@ user_directory:
|
||||
enabled: false
|
||||
search_all_users: true
|
||||
prefer_local_users: true
|
||||
show_locked_users: true
|
||||
```
|
||||
---
|
||||
### `user_consent`
|
||||
@@ -3780,6 +3867,19 @@ Example configuration:
|
||||
```yaml
|
||||
forget_rooms_on_leave: false
|
||||
```
|
||||
---
|
||||
### `exclude_rooms_from_sync`
|
||||
A list of rooms to exclude from sync responses. This is useful for server
|
||||
administrators wishing to group users into a room without these users being able
|
||||
to see it from their client.
|
||||
|
||||
By default, no room is excluded.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
exclude_rooms_from_sync:
|
||||
- !foo:example.com
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing
|
||||
@@ -3930,13 +4030,14 @@ federation_sender_instances:
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured, and to the main process.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
The main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is declared
|
||||
inside the `listener` block for a `replication` listener.
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
|
||||
replication listener of the worker, if configured, and to the main process. Each worker
|
||||
declared under [`stream_writers`](../../workers.md#stream-writers) and
|
||||
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
|
||||
replication listener, and that listener should be included in the `instance_map`. The
|
||||
main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is
|
||||
declared inside the `listener` block for a `replication` listener.
|
||||
|
||||
|
||||
Example configuration:
|
||||
@@ -3949,6 +4050,14 @@ instance_map:
|
||||
host: localhost
|
||||
port: 8034
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
path: /var/run/synapse/main_replication.sock
|
||||
worker1:
|
||||
path: /var/run/synapse/worker1_replication.sock
|
||||
```
|
||||
---
|
||||
### `stream_writers`
|
||||
|
||||
@@ -3966,6 +4075,24 @@ stream_writers:
|
||||
typing: worker1
|
||||
```
|
||||
---
|
||||
### `outbound_federation_restricted_to`
|
||||
|
||||
When using workers, you can restrict outbound federation traffic to only go through a
|
||||
specific subset of workers. Any worker specified here must also be in the
|
||||
[`instance_map`](#instance_map).
|
||||
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
|
||||
authorize inter-worker communication.
|
||||
|
||||
```yaml
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
Also see the [worker
|
||||
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
|
||||
for more info.
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
@@ -4090,51 +4217,6 @@ Example configuration:
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
@@ -4153,6 +4235,18 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
Example configuration(#2, using UNIX sockets with a `replication` listener):
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_public.sock
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_replication.sock
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
|
||||
@@ -95,9 +95,12 @@ for the main process
|
||||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
|
||||
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above).
|
||||
* Note that the `host` defined is the address the worker needs to look for the `main`
|
||||
process at, not necessarily the same address that is bound to.
|
||||
* If you are using Unix sockets for the `replication` resource, make sure to
|
||||
use a `path` to the socket file instead of a `port`.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
@@ -145,9 +148,6 @@ In the config file for each worker, you must specify:
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
|
||||
|
||||
For example:
|
||||
|
||||
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
|
||||
|
||||
You can start the main Synapse process with Poetry by running the following command:
|
||||
```console
|
||||
poetry run synapse_homeserver -c [your homeserver.yaml]
|
||||
poetry run synapse_homeserver --config-file [your homeserver.yaml]
|
||||
```
|
||||
For worker setups, you can run the following command
|
||||
```console
|
||||
poetry run synapse_worker -c [your worker.yaml]
|
||||
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
|
||||
```
|
||||
## Available worker applications
|
||||
|
||||
@@ -531,6 +531,30 @@ the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
#### Restrict outbound federation traffic to a specific set of workers
|
||||
|
||||
The
|
||||
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
|
||||
configuration is useful to make sure outbound federation traffic only goes through a
|
||||
specified subset of workers. This allows you to set more strict access controls (like a
|
||||
firewall) for all workers and only allow the `federation_sender`'s to contact the
|
||||
outside world.
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
federation_sender1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
|
||||
#### Background tasks
|
||||
|
||||
There is also support for moving background tasks to a separate
|
||||
|
||||
148
flake.lock
generated
148
flake.lock
generated
@@ -8,41 +8,20 @@
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"lastModified": 1688058187,
|
||||
"narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"ref": "v0.6.3",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682490133,
|
||||
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
@@ -60,12 +39,33 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -170,27 +170,27 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"lastModified": 1685801374,
|
||||
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -200,6 +200,22 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
@@ -215,11 +231,11 @@
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"lastModified": 1688056373,
|
||||
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -231,25 +247,27 @@
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"fenix": "fenix",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems"
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_3"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682426789,
|
||||
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
|
||||
"lastModified": 1690510705,
|
||||
"narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
@@ -267,6 +285,36 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
41
flake.nix
41
flake.nix
@@ -39,27 +39,27 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
# Rust toolchains and rust-analyzer nightly.
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
devenv.url = "github:cachix/devenv/v0.6.3";
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
devShells = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
overlays = [ (import rust-overlay) ];
|
||||
pkgs = import nixpkgs {
|
||||
inherit system overlays;
|
||||
};
|
||||
in {
|
||||
# Everything is configured via devenv - a Nix module for creating declarative
|
||||
# developer environments. See https://devenv.sh/reference/options/ for a list
|
||||
@@ -76,6 +76,20 @@
|
||||
# Configure packages to install.
|
||||
# Search for package names at https://search.nixos.org/packages?channel=unstable
|
||||
packages = with pkgs; [
|
||||
# The rust toolchain and related tools.
|
||||
# This will install the "default" profile of rust components.
|
||||
# https://rust-lang.github.io/rustup/concepts/profiles.html
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.70.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
})
|
||||
# The rust-analyzer language server implementation.
|
||||
rust-analyzer
|
||||
|
||||
# Native dependencies for running Synapse.
|
||||
icu
|
||||
libffi
|
||||
@@ -124,12 +138,11 @@
|
||||
# Install dependencies for the additional programming languages
|
||||
# involved with Synapse development.
|
||||
#
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# * Golang is needed to run the Complement test suite.
|
||||
# * Perl is needed to run the SyTest test suite.
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# It is installed manually with `packages` above.
|
||||
languages.go.enable = true;
|
||||
languages.rust.enable = true;
|
||||
languages.rust.version = "stable";
|
||||
languages.perl.enable = true;
|
||||
|
||||
# Postgres is needed to run Synapse with postgres support and
|
||||
@@ -178,7 +191,7 @@
|
||||
EOF
|
||||
'';
|
||||
# Start synapse when `devenv up` is run.
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
|
||||
16
mypy.ini
16
mypy.ini
@@ -45,6 +45,13 @@ warn_unused_ignores = False
|
||||
disallow_untyped_defs = False
|
||||
disallow_incomplete_defs = False
|
||||
|
||||
[mypy-synapse.util.manhole]
|
||||
# This module imports something from Twisted which has a bad annotation in Twisted trunk,
|
||||
# but is unannotated in Twisted's latest release. We want to type-ignore the problem
|
||||
# in the twisted trunk job, even though it has no effect on normal mypy runs.
|
||||
warn_unused_ignores = False
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
;; The `typeshed` project maintains stubs here:
|
||||
@@ -80,18 +87,9 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-srvlookup.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
1312
poetry.lock
generated
1312
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -35,7 +35,7 @@
|
||||
showcontent = true
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py37', 'py38', 'py39', 'py310']
|
||||
target-version = ['py38', 'py39', 'py310', 'py311']
|
||||
# black ignores everything in .gitignore by default, see
|
||||
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
|
||||
# Use `extend-exclude` if you want to exclude something in addition to this.
|
||||
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.86.0"
|
||||
version = "1.92.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7.1"
|
||||
python = "^3.8.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -203,11 +203,9 @@ ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
|
||||
pydantic = "^1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
@@ -308,10 +306,13 @@ all = [
|
||||
]
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
# We pin development dependencies in poetry.lock so that our tests don't start
|
||||
# failing on new releases. Keeping lower bounds loose here means that dependabot
|
||||
# can bump versions without having to update the content-hash in the lockfile.
|
||||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.275"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.0.286"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
@@ -369,13 +370,21 @@ furo = ">=2022.12.7,<2024.0.0"
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
|
||||
PushRules, SimpleJsonValue,
|
||||
@@ -26,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -71,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -116,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -161,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -194,7 +197,6 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
|
||||
|
||||
@@ -63,22 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
// We don't want to notify on edits. Not only can this be confusing in real
|
||||
// time (2 notifications, one message) but it's especially confusing
|
||||
// if a bridge needs to edit a previously backfilled message.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
|
||||
pattern: Cow::Borrowed("m.replace"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
|
||||
priority_class: 5,
|
||||
@@ -146,7 +130,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
|
||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||
}),
|
||||
)]),
|
||||
@@ -167,8 +151,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.room"),
|
||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.room"),
|
||||
value: Cow::Owned(SimpleJsonValue::Bool(true)),
|
||||
})),
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
key: Cow::Borrowed("room"),
|
||||
@@ -241,6 +225,21 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
// We don't want to notify on edits *unless* the edit directly mentions a
|
||||
// user, which is handled above.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
|
||||
EventPropertyIsCondition {
|
||||
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
|
||||
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
|
||||
priority_class: 5,
|
||||
|
||||
@@ -117,7 +117,7 @@ impl PushRuleEvaluator {
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = match flattened_keys.get("content.body") {
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
@@ -313,13 +313,15 @@ impl PushRuleEvaluator {
|
||||
};
|
||||
|
||||
let pattern = match &*exact_event_match.value_type {
|
||||
EventMatchPatternType::UserId => user_id,
|
||||
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
||||
EventMatchPatternType::UserId => user_id.to_owned(),
|
||||
EventMatchPatternType::UserLocalpart => {
|
||||
get_localpart_from_id(user_id)?.to_owned()
|
||||
}
|
||||
};
|
||||
|
||||
self.match_event_property_contains(
|
||||
exact_event_match.key.clone(),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
|
||||
)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
@@ -494,7 +496,7 @@ fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
@@ -522,7 +524,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
@@ -562,7 +564,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum SimpleJsonValue {
|
||||
Str(String),
|
||||
Str(Cow<'static, str>),
|
||||
Int(i64),
|
||||
Bool(bool),
|
||||
Null,
|
||||
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Str(s.to_string()))
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
@@ -527,7 +527,6 @@ pub struct FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@@ -539,7 +538,6 @@ impl FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
@@ -547,7 +545,6 @@ impl FilteredPushRules {
|
||||
msc1767_enabled,
|
||||
msc3381_polls_enabled,
|
||||
msc3664_enabled,
|
||||
msc3958_suppress_edits_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,12 +581,6 @@ impl FilteredPushRules {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc3958_suppress_edits_enabled
|
||||
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
|
||||
@@ -22,15 +22,19 @@ from typing import Collection, Optional, Sequence, Set
|
||||
|
||||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
#
|
||||
# If an EOL is forced by a Python version and we're dropping support for it, make sure
|
||||
# to remove references to the distibution across Synapse (search for "bullseye" for
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
@@ -43,7 +47,7 @@ can be passed on the commandline for debugging.
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
|
||||
class Builder(object):
|
||||
class Builder:
|
||||
def __init__(
|
||||
self,
|
||||
redirect_stdout: bool = False,
|
||||
|
||||
@@ -43,7 +43,7 @@ def main(force_colors: bool) -> None:
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
# Get the schema version of the local file to check against current schema on develop
|
||||
with open("synapse/storage/schema/__init__.py", "r") as file:
|
||||
with open("synapse/storage/schema/__init__.py") as file:
|
||||
local_schema = file.read()
|
||||
new_locals: Dict[str, Any] = {}
|
||||
exec(local_schema, new_locals)
|
||||
|
||||
@@ -214,7 +214,7 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -253,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
|
||||
@@ -247,7 +247,7 @@ def main() -> None:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -245,11 +244,17 @@ def _prepare() -> None:
|
||||
else:
|
||||
debian_version = new_version
|
||||
|
||||
run_until_successful(
|
||||
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
|
||||
shell=True,
|
||||
)
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
if sys.platform == "darwin":
|
||||
run_until_successful(
|
||||
f"docker run --rm -v .:/synapse ubuntu:latest /synapse/scripts-dev/docker_update_debian_changelog.sh {new_version}",
|
||||
shell=True,
|
||||
)
|
||||
else:
|
||||
run_until_successful(
|
||||
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
|
||||
shell=True,
|
||||
)
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
synapse_repo.git.add("-u")
|
||||
@@ -567,19 +572,27 @@ def _notify(message: str) -> None:
|
||||
# for this.
|
||||
click.echo(f"\a{message}")
|
||||
|
||||
app_name = "Synapse Release Script"
|
||||
|
||||
# Try and run notify-send, but don't raise an Exception if this fails
|
||||
# (This is best-effort)
|
||||
# TODO Support other platforms?
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
"Synapse Release Script",
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
if sys.platform == "darwin":
|
||||
# See https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/reference/ASLR_cmds.html#//apple_ref/doc/uid/TP40000983-CH216-SW224
|
||||
subprocess.run(
|
||||
f"""osascript -e 'display notification "{message}" with title "{app_name}"'""",
|
||||
shell=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(
|
||||
[
|
||||
"notify-send",
|
||||
"--app-name",
|
||||
app_name,
|
||||
"--expire-time",
|
||||
"3600000",
|
||||
message,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
|
||||
@@ -145,7 +145,7 @@ Example usage:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
|
||||
@@ -46,7 +46,6 @@ class FilteredPushRules:
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
|
||||
@@ -21,12 +21,21 @@ import os
|
||||
import sys
|
||||
from typing import Any, Dict
|
||||
|
||||
from PIL import ImageFile
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Allow truncated JPEG images to be thumbnailed.
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
#
|
||||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
@@ -78,7 +87,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import synapse.util
|
||||
import synapse.util # noqa: E402
|
||||
|
||||
__version__ = synapse.util.SYNAPSE_VERSION
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
@@ -122,7 +123,7 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"users": ["shadow_banned", "approved", "locked"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
@@ -196,6 +197,11 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
|
||||
# after porting, and b) the circular foreign key constraints make it hard to
|
||||
# port.
|
||||
"worker_read_write_locks_mode",
|
||||
"worker_read_write_locks",
|
||||
}
|
||||
|
||||
|
||||
@@ -239,6 +245,7 @@ class Store(
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -475,7 +482,10 @@ class Porter:
|
||||
do_backward[0] = False
|
||||
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
assert txn.description is not None
|
||||
headers: Optional[List[str]] = [
|
||||
column[0] for column in txn.description
|
||||
]
|
||||
else:
|
||||
headers = None
|
||||
|
||||
@@ -537,6 +547,7 @@ class Porter:
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select, (forward_chunk, self.batch_size))
|
||||
rows = txn.fetchall()
|
||||
assert txn.description is not None
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
return headers, rows
|
||||
@@ -754,7 +765,7 @@ class Porter:
|
||||
|
||||
# Step 2. Set up sequences
|
||||
#
|
||||
# We do this before porting the tables so that event if we fail half
|
||||
# We do this before porting the tables so that even if we fail half
|
||||
# way through the postgres DB always have sequences that are greater
|
||||
# than their respective tables. If we don't then creating the
|
||||
# `DataStore` object will fail due to the inconsistency.
|
||||
@@ -762,6 +773,10 @@ class Porter:
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
@@ -772,6 +787,11 @@ class Porter:
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -803,7 +823,9 @@ class Porter:
|
||||
)
|
||||
# Map from table name to args passed to `handle_table`, i.e. a tuple
|
||||
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
|
||||
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
|
||||
tables_to_port_info_map = {
|
||||
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
|
||||
}
|
||||
|
||||
# Step 5. Do the copying.
|
||||
#
|
||||
@@ -901,7 +923,8 @@ class Porter:
|
||||
def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]:
|
||||
txn.execute(select)
|
||||
rows = txn.fetchall()
|
||||
headers: List[str] = [column[0] for column in txn.description]
|
||||
assert txn.description is not None
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
ts_ind = headers.index("ts")
|
||||
|
||||
@@ -1074,7 +1097,10 @@ class Porter:
|
||||
)
|
||||
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
@@ -1084,7 +1110,7 @@ class Porter:
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
retcol=f"COALESCE(MAX({column_name}), 1)",
|
||||
allow_none=True,
|
||||
),
|
||||
)
|
||||
@@ -1184,10 +1210,10 @@ class CursesProgress(Progress):
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
|
||||
super(CursesProgress, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(CursesProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
@@ -1283,7 +1309,7 @@ class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super(MockHomeserver, self).__init__(
|
||||
super().__init__(
|
||||
hostname=config.server.server_name,
|
||||
config=config,
|
||||
reactor=reactor,
|
||||
|
||||
@@ -60,6 +60,7 @@ class Auth(Protocol):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
@@ -79,7 +80,7 @@ class InternalAuth(BaseAuth):
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired
|
||||
request, allow_guest, allow_expired, allow_locked
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
@@ -107,6 +108,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool,
|
||||
allow_expired: bool,
|
||||
allow_locked: bool,
|
||||
) -> Requester:
|
||||
"""Helper for get_user_by_req
|
||||
|
||||
@@ -126,6 +128,17 @@ class InternalAuth(BaseAuth):
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
# Deny the request if the user account is locked.
|
||||
if not allow_locked and await self.store.get_user_locked_status(
|
||||
requester.user.to_string()
|
||||
):
|
||||
raise AuthError(
|
||||
401,
|
||||
"User account has been locked",
|
||||
errcode=Codes.USER_LOCKED,
|
||||
additional_fields={"soft_logout": True},
|
||||
)
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
|
||||
@@ -20,6 +20,7 @@ from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret
|
||||
from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
|
||||
from authlib.oauth2.rfc7662 import IntrospectionToken
|
||||
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||
from prometheus_client import Histogram
|
||||
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
@@ -44,6 +45,13 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
introspection_response_timer = Histogram(
|
||||
"synapse_api_auth_delegated_introspection_response",
|
||||
"Time taken to get a response for an introspection request",
|
||||
["code"],
|
||||
)
|
||||
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
@@ -99,6 +107,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
assert self._config.client_id, "No client_id provided"
|
||||
assert auth_method is not None, "Invalid client_auth_method provided"
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token = self._config.admin_token
|
||||
@@ -163,14 +172,26 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# Do the actual request
|
||||
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||
# check the HTTP status code, and we do the body encoding ourselves.
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
start_time = self._clock.time()
|
||||
try:
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
except Exception:
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels("ERR").observe(end_time - start_time)
|
||||
raise
|
||||
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels(response.code).observe(
|
||||
end_time - start_time
|
||||
)
|
||||
|
||||
if response.code < 200 or response.code >= 300:
|
||||
raise HttpResponseException(
|
||||
@@ -196,6 +217,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
import enum
|
||||
|
||||
from typing_extensions import Final
|
||||
from typing import Final
|
||||
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import typing
|
||||
from enum import Enum
|
||||
from http import HTTPStatus
|
||||
@@ -80,6 +81,8 @@ class Codes(str, Enum):
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
# USER_LOCKED = "M_USER_LOCKED"
|
||||
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
|
||||
|
||||
# Part of MSC3848
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3848
|
||||
@@ -208,6 +211,11 @@ class SynapseError(CodeMessageException):
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
@property
|
||||
def debug_context(self) -> Optional[str]:
|
||||
"""Override this to add debugging context that shouldn't be sent to clients."""
|
||||
return None
|
||||
|
||||
|
||||
class InvalidAPICallError(SynapseError):
|
||||
"""You called an existing API endpoint, but fed that endpoint
|
||||
@@ -217,6 +225,13 @@ class InvalidAPICallError(SynapseError):
|
||||
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
|
||||
|
||||
|
||||
class InvalidProxyCredentialsError(SynapseError):
|
||||
"""Error raised when the proxy credentials are invalid."""
|
||||
|
||||
def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
super().__init__(401, msg, errcode)
|
||||
|
||||
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
@@ -494,19 +509,31 @@ class InvalidCaptchaError(SynapseError):
|
||||
class LimitExceededError(SynapseError):
|
||||
"""A client has sent too many requests and is being throttled."""
|
||||
|
||||
include_retry_after_header = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
limiter_name: str,
|
||||
code: int = 429,
|
||||
msg: str = "Too Many Requests",
|
||||
retry_after_ms: Optional[int] = None,
|
||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||
):
|
||||
super().__init__(code, msg, errcode)
|
||||
headers = (
|
||||
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
|
||||
if self.include_retry_after_header and retry_after_ms is not None
|
||||
else None
|
||||
)
|
||||
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
self.limiter_name = limiter_name
|
||||
|
||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||
|
||||
@property
|
||||
def debug_context(self) -> Optional[str]:
|
||||
return self.limiter_name
|
||||
|
||||
|
||||
class RoomKeysVersionError(SynapseError):
|
||||
"""A client has tried to upload to a non-current version of the room_keys store"""
|
||||
|
||||
@@ -40,7 +40,7 @@ class Ratelimiter:
|
||||
- the cost C of this request in tokens.
|
||||
Then, if there is room in the bucket for C tokens (T + C <= `burst_count`),
|
||||
the request is permitted and `cost` tokens are added to the bucket.
|
||||
Otherwise the request is denied, and the bucket continues to hold T tokens.
|
||||
Otherwise, the request is denied, and the bucket continues to hold T tokens.
|
||||
|
||||
This means that the limiter enforces an average request frequency of `rate_hz`,
|
||||
while accumulating a buffer of up to `burst_count` requests which can be consumed
|
||||
@@ -55,18 +55,23 @@ class Ratelimiter:
|
||||
request.
|
||||
|
||||
Args:
|
||||
store: The datastore providing get_ratelimit_for_user.
|
||||
clock: A homeserver clock, for retrieving the current time
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
cfg: The ratelimit configuration for this rate limiter including the
|
||||
allowed rate and burst count.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int
|
||||
self,
|
||||
store: DataStore,
|
||||
clock: Clock,
|
||||
cfg: RatelimitSettings,
|
||||
):
|
||||
self.clock = clock
|
||||
self.rate_hz = rate_hz
|
||||
self.burst_count = burst_count
|
||||
self.rate_hz = cfg.per_second
|
||||
self.burst_count = cfg.burst_count
|
||||
self.store = store
|
||||
self._limiter_name = cfg.key
|
||||
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
@@ -305,7 +310,8 @@ class Ratelimiter:
|
||||
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s))
|
||||
limiter_name=self._limiter_name,
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
|
||||
)
|
||||
|
||||
|
||||
@@ -322,7 +328,9 @@ class RequestRatelimiter:
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
cfg=RatelimitSettings(key=rc_message.key, per_second=0, burst_count=0),
|
||||
)
|
||||
self._rc_message = rc_message
|
||||
|
||||
@@ -332,8 +340,7 @@ class RequestRatelimiter:
|
||||
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=rc_admin_redaction.per_second,
|
||||
burst_count=rc_admin_redaction.burst_count,
|
||||
cfg=rc_admin_redaction,
|
||||
)
|
||||
else:
|
||||
self.admin_redaction_ratelimiter = None
|
||||
|
||||
@@ -78,36 +78,29 @@ class RoomVersion:
|
||||
# MSC2209: Check 'notifications' key while verifying
|
||||
# m.room.power_levels auth rules.
|
||||
limit_notifications_power_levels: bool
|
||||
# MSC2175: No longer include the creator in m.room.create events.
|
||||
msc2175_implicit_room_creator: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
|
||||
# content property.
|
||||
msc2176_redaction_rules: bool
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules: bool
|
||||
# MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
|
||||
# be enabled if MSC3083 is not.
|
||||
msc3375_redaction_rules: bool
|
||||
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
||||
# m.room.membership event with membership 'knock'.
|
||||
msc2403_knocking: bool
|
||||
# No longer include the creator in m.room.create events.
|
||||
implicit_room_creator: bool
|
||||
# Apply updated redaction rules algorithm from room version 11.
|
||||
updated_redaction_rules: bool
|
||||
# Support the 'restricted' join rule.
|
||||
restricted_join_rule: bool
|
||||
# Support for the proper redaction rules for the restricted join rule. This requires
|
||||
# restricted_join_rule to be enabled.
|
||||
restricted_join_rule_fix: bool
|
||||
# Support the 'knock' join rule.
|
||||
knock_join_rule: bool
|
||||
# MSC3389: Protect relation information from redaction.
|
||||
msc3389_relation_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3821: Do not redact the third_party_invite content field for membership events.
|
||||
msc3821_redaction_rules: bool
|
||||
# Support the 'knock_restricted' join rule.
|
||||
knock_restricted_join_rule: bool
|
||||
# Enforce integer power levels
|
||||
enforce_int_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3989: Redact the origin field.
|
||||
msc3989_redaction_rules: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -120,17 +113,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -141,17 +132,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -162,17 +151,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -183,17 +170,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -204,17 +189,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -225,38 +208,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -267,17 +227,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -288,17 +246,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -309,59 +265,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3821 = RoomVersion(
|
||||
"org.matrix.msc3821.opt1",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -372,17 +284,15 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
@@ -394,60 +304,34 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3989 = RoomVersion(
|
||||
"org.matrix.msc3989",
|
||||
RoomDisposition.UNSTABLE,
|
||||
V11 = RoomVersion(
|
||||
"11",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True,
|
||||
)
|
||||
MSC3820opt2 = RoomVersion(
|
||||
# Based upon v10
|
||||
"org.matrix.msc3820.opt2",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=True, # Used by MSC3820
|
||||
msc2176_redaction_rules=True, # Used by MSC3820
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=True, # Used by MSC3820
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True, # Used by MSC3820
|
||||
)
|
||||
|
||||
|
||||
@@ -460,14 +344,11 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC3989,
|
||||
RoomVersions.MSC3820opt2,
|
||||
RoomVersions.V11,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -496,12 +377,12 @@ MSC3244_CAPABILITIES = {
|
||||
RoomVersionCapability(
|
||||
"knock",
|
||||
RoomVersions.V7,
|
||||
lambda room_version: room_version.msc2403_knocking,
|
||||
lambda room_version: room_version.knock_join_rule,
|
||||
),
|
||||
RoomVersionCapability(
|
||||
"restricted",
|
||||
RoomVersions.V9,
|
||||
lambda room_version: room_version.msc3083_join_rules,
|
||||
lambda room_version: room_version.restricted_join_rule,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -386,6 +386,7 @@ def listen_unix(
|
||||
|
||||
|
||||
def listen_http(
|
||||
hs: "HomeServer",
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
@@ -406,6 +407,7 @@ def listen_http(
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
hs=hs,
|
||||
)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
|
||||
@@ -91,6 +91,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.task_scheduler import TaskSchedulerWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
@@ -144,6 +145,7 @@ class GenericWorkerStore(
|
||||
TransactionWorkerStore,
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
@@ -221,6 +223,7 @@ class GenericWorkerServer(HomeServer):
|
||||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
|
||||
@@ -139,6 +139,7 @@ class SynapseHomeServer(HomeServer):
|
||||
root_resource = OptionsResource()
|
||||
|
||||
ports = listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
|
||||
@@ -16,9 +16,6 @@ import logging
|
||||
import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -27,10 +24,11 @@ from typing import (
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
from typing_extensions import ParamSpec, TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
@@ -80,9 +78,7 @@ sent_todevice_counter = Counter(
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@@ -123,52 +119,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self.config = hs.config.appservice
|
||||
|
||||
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -177,12 +133,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -203,12 +159,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -241,15 +197,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
args: Mapping[bytes, Union[List[bytes], str]] = fields
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -285,12 +240,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
try:
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
info = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
@@ -401,13 +356,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
|
||||
await self.put_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
|
||||
@@ -186,9 +186,9 @@ class Config:
|
||||
TypeError, if given something other than an integer or a string
|
||||
ValueError: if given a string not of the form described above.
|
||||
"""
|
||||
if type(value) is int:
|
||||
if type(value) is int: # noqa: E721
|
||||
return value
|
||||
elif type(value) is str:
|
||||
elif isinstance(value, str):
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
@@ -218,9 +218,9 @@ class Config:
|
||||
TypeError, if given something other than an integer or a string
|
||||
ValueError: if given a string not of the form described above.
|
||||
"""
|
||||
if type(value) is int:
|
||||
if type(value) is int: # noqa: E721
|
||||
return value
|
||||
elif type(value) is str:
|
||||
elif isinstance(value, str):
|
||||
second = 1000
|
||||
minute = 60 * second
|
||||
hour = 60 * minute
|
||||
|
||||
@@ -34,7 +34,7 @@ class AppServiceConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
if not isinstance(self.app_service_config_files, list) or not all(
|
||||
type(x) is str for x in self.app_service_config_files
|
||||
isinstance(x, str) for x in self.app_service_config_files
|
||||
):
|
||||
raise ConfigError(
|
||||
"Expected '%s' to be a list of AS config files:"
|
||||
@@ -43,6 +43,14 @@ class AppServiceConfig(Config):
|
||||
)
|
||||
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
self.use_appservice_legacy_authorization = config.get(
|
||||
"use_appservice_legacy_authorization", False
|
||||
)
|
||||
if self.use_appservice_legacy_authorization:
|
||||
logger.warning(
|
||||
"The use of appservice legacy authorization via query params is deprecated"
|
||||
" and should be considered insecure."
|
||||
)
|
||||
|
||||
|
||||
def load_appservices(
|
||||
|
||||
@@ -31,7 +31,7 @@ class AuthConfig(Config):
|
||||
|
||||
# The default value of password_config.enabled is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
config.get("experimental_features", {})
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, List
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
from ._util import validate_config
|
||||
|
||||
|
||||
@@ -41,15 +41,30 @@ class CasConfig(Config):
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket"
|
||||
|
||||
self.cas_protocol_version = cas_config.get("protocol_version")
|
||||
if (
|
||||
self.cas_protocol_version is not None
|
||||
and self.cas_protocol_version not in [1, 2, 3]
|
||||
):
|
||||
raise ConfigError(
|
||||
"Unsupported CAS protocol version %s (only versions 1, 2, 3 are supported)"
|
||||
% (self.cas_protocol_version,),
|
||||
("cas_config", "protocol_version"),
|
||||
)
|
||||
self.cas_displayname_attribute = cas_config.get("displayname_attribute")
|
||||
required_attributes = cas_config.get("required_attributes") or {}
|
||||
self.cas_required_attributes = _parsed_required_attributes_def(
|
||||
required_attributes
|
||||
)
|
||||
|
||||
self.idp_name = cas_config.get("idp_name", "CAS")
|
||||
self.idp_icon = cas_config.get("idp_icon")
|
||||
self.idp_brand = cas_config.get("idp_brand")
|
||||
|
||||
else:
|
||||
self.cas_server_url = None
|
||||
self.cas_service_url = None
|
||||
self.cas_protocol_version = None
|
||||
self.cas_displayname_attribute = None
|
||||
self.cas_required_attributes = []
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||
import attr
|
||||
import attr.validators
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import Config, RootConfig
|
||||
@@ -173,6 +174,13 @@ class MSC3861:
|
||||
("enable_registration",),
|
||||
)
|
||||
|
||||
# We only need to test the user consent version, as if it must be set if the user_consent section was present in the config
|
||||
if root.consent.user_consent_version is not None:
|
||||
raise ConfigError(
|
||||
"User consent cannot be enabled when OAuth delegation is enabled",
|
||||
("user_consent",),
|
||||
)
|
||||
|
||||
if (
|
||||
root.oidc.oidc_enabled
|
||||
or root.saml2.saml2_enabled
|
||||
@@ -216,10 +224,10 @@ class MSC3861:
|
||||
("session_lifetime",),
|
||||
)
|
||||
|
||||
if not root.experimental.msc3970_enabled:
|
||||
if root.registration.enable_3pid_changes:
|
||||
raise ConfigError(
|
||||
"experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled",
|
||||
("experimental_features", "msc3970_enabled"),
|
||||
"enable_3pid_changes cannot be enabled when OAuth delegation is enabled",
|
||||
("enable_3pid_changes",),
|
||||
)
|
||||
|
||||
|
||||
@@ -247,6 +255,27 @@ class ExperimentalConfig(Config):
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
# MSC2697 (device dehydration)
|
||||
# Enabled by default since this option was added after adding the feature.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
|
||||
|
||||
# MSC3814 (dehydrated devices with SSSS)
|
||||
# This is an alternative method to achieve the same goals as MSC2697.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
|
||||
|
||||
if self.msc2697_enabled and self.msc3814_enabled:
|
||||
raise ConfigError(
|
||||
"MSC2697 and MSC3814 should not both be enabled.",
|
||||
(
|
||||
"experimental_features",
|
||||
"msc3814_enabled",
|
||||
),
|
||||
)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
|
||||
|
||||
@@ -355,11 +384,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3959: Do not generate notifications for edits.
|
||||
self.msc3958_supress_edit_notifs = experimental.get(
|
||||
"msc3958_supress_edit_notifs", False
|
||||
)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
@@ -376,16 +400,18 @@ class ExperimentalConfig(Config):
|
||||
"Invalid MSC3861 configuration", ("experimental", "msc3861")
|
||||
) from exc
|
||||
|
||||
# MSC3970: Scope transaction IDs to devices
|
||||
self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled)
|
||||
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
# MSC4009: E.164 Matrix IDs
|
||||
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
)
|
||||
|
||||
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
|
||||
#
|
||||
# This is a bit hacky, but the most reasonable way to *alway* include the
|
||||
# headers.
|
||||
LimitExceededError.include_retry_after_header = experimental.get(
|
||||
"msc4041_enabled", False
|
||||
)
|
||||
|
||||
@@ -65,5 +65,23 @@ class FederationConfig(Config):
|
||||
self.max_long_retries = federation_config.get("max_long_retries", 10)
|
||||
self.max_short_retries = federation_config.get("max_short_retries", 3)
|
||||
|
||||
# Allow for the configuration of the backoff algorithm used
|
||||
# when trying to reach an unavailable destination.
|
||||
# Unlike previous configuration those values applies across
|
||||
# multiple requests and the state of the backoff is stored on DB.
|
||||
self.destination_min_retry_interval_ms = Config.parse_duration(
|
||||
federation_config.get("destination_min_retry_interval", "10m")
|
||||
)
|
||||
self.destination_retry_multiplier = federation_config.get(
|
||||
"destination_retry_multiplier", 2
|
||||
)
|
||||
self.destination_max_retry_interval_ms = min(
|
||||
Config.parse_duration(
|
||||
federation_config.get("destination_max_retry_interval", "7d")
|
||||
),
|
||||
# Set a hard-limit to not overflow the database column.
|
||||
2**62,
|
||||
)
|
||||
|
||||
|
||||
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
@@ -280,6 +280,20 @@ def _parse_oidc_config_dict(
|
||||
for x in oidc_config.get("attribute_requirements", [])
|
||||
]
|
||||
|
||||
# Read from either `client_secret_path` or `client_secret`. If both exist, error.
|
||||
client_secret = oidc_config.get("client_secret")
|
||||
client_secret_path = oidc_config.get("client_secret_path")
|
||||
if client_secret_path is not None:
|
||||
if client_secret is None:
|
||||
client_secret = read_file(
|
||||
client_secret_path, config_path + ("client_secret_path",)
|
||||
).rstrip("\n")
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Cannot specify both client_secret and client_secret_path",
|
||||
config_path + ("client_secret",),
|
||||
)
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
@@ -288,7 +302,7 @@ def _parse_oidc_config_dict(
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret=client_secret,
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
pkce_method=oidc_config.get("pkce_method", "auto"),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user