Compare commits
275 Commits
anoa/modul
...
v1.84.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5cae9158e6 | ||
|
|
ea6fcda98d | ||
|
|
11ff4884e7 | ||
|
|
b6a7d49b6f | ||
|
|
0ccfb9318c | ||
|
|
3ec9f3b0cc | ||
|
|
c97198ee14 | ||
|
|
55b08534a4 | ||
|
|
ba572647b2 | ||
|
|
f2905d827f | ||
|
|
eb3c1823d8 | ||
|
|
ba6b21c81e | ||
|
|
8583346335 | ||
|
|
b3ada9bfb4 | ||
|
|
aa5c0592e7 | ||
|
|
3690d5bd89 | ||
|
|
7b6c9f4c04 | ||
|
|
2e8a2bda52 | ||
|
|
3fd8eb81de | ||
|
|
1b4782a37d | ||
|
|
34ab801379 | ||
|
|
bcd2495469 | ||
|
|
def480442d | ||
|
|
808105bd31 | ||
|
|
c96a1d2a27 | ||
|
|
08297f2f18 | ||
|
|
7c76514f1e | ||
|
|
d19d1edbcf | ||
|
|
5a7742a833 | ||
|
|
2611433b70 | ||
|
|
5bf9ec9e3e | ||
|
|
e4f545c452 | ||
|
|
722ccc30b5 | ||
|
|
7e6ad62c49 | ||
|
|
86d541f37c | ||
|
|
d3bd03559b | ||
|
|
ab4535b608 | ||
|
|
266d287165 | ||
|
|
64a11fb61f | ||
|
|
4b4e0dc3ce | ||
|
|
2bfe3f0b81 | ||
|
|
6b7da31221 | ||
|
|
58a07f0c3f | ||
|
|
058c6269f3 | ||
|
|
0a18aa236d | ||
|
|
db093df5eb | ||
|
|
245d34bdcc | ||
|
|
9a87895b59 | ||
|
|
28bceef84e | ||
|
|
36df9c5e36 | ||
|
|
a0f53afd62 | ||
|
|
ad141efb47 | ||
|
|
7c95b65873 | ||
|
|
e46d5f3586 | ||
|
|
83e7fa5eee | ||
|
|
2e59e97ebd | ||
|
|
ded8f3d349 | ||
|
|
cc872eaf16 | ||
|
|
5f8822854d | ||
|
|
1d6140ec8a | ||
|
|
7be05df0b1 | ||
|
|
8aee823393 | ||
|
|
28ac1a1a91 | ||
|
|
fc3a878220 | ||
|
|
3b837d856c | ||
|
|
9890f23469 | ||
|
|
a7b3e9ce65 | ||
|
|
04e79e6a18 | ||
|
|
0e8aa2a1b2 | ||
|
|
4de271a7fc | ||
|
|
1c0e98717b | ||
|
|
ca6bda2f57 | ||
|
|
60d59af300 | ||
|
|
7b41966be9 | ||
|
|
6aca4e7cb8 | ||
|
|
07b1c70d6b | ||
|
|
3b853b18b5 | ||
|
|
0da7cceae9 | ||
|
|
fcc943d552 | ||
|
|
229fe1d197 | ||
|
|
1046184f35 | ||
|
|
89f6fb0d5a | ||
|
|
eb6f8dc215 | ||
|
|
57aeeb308b | ||
|
|
6efa674004 | ||
|
|
a346b43837 | ||
|
|
6b2f2bd276 | ||
|
|
486c059479 | ||
|
|
3e95c19911 | ||
|
|
301b4156d5 | ||
|
|
247e6a8a78 | ||
|
|
e2e9b545ff | ||
|
|
9900f7c231 | ||
|
|
710502c6d8 | ||
|
|
8e9739449d | ||
|
|
b39b02c26e | ||
|
|
c34791ef5b | ||
|
|
c55293c230 | ||
|
|
8b3a502996 | ||
|
|
ea5c3ede4f | ||
|
|
19141b9432 | ||
|
|
625ebbf92d | ||
|
|
62e27ceb89 | ||
|
|
8b4fb64f1e | ||
|
|
c306fdeb38 | ||
|
|
6e32ecf62a | ||
|
|
197fbb123b | ||
|
|
5e024a0645 | ||
|
|
ae69d69525 | ||
|
|
cb8e274c07 | ||
|
|
2f144dcdee | ||
|
|
ce00710303 | ||
|
|
aec639e3e3 | ||
|
|
929797d939 | ||
|
|
e12d788bb7 | ||
|
|
d935b806a5 | ||
|
|
838de27666 | ||
|
|
745704ca69 | ||
|
|
3a82433ccf | ||
|
|
efab118251 | ||
|
|
49482222ca | ||
|
|
fce59ca5a1 | ||
|
|
0475cae3ac | ||
|
|
c9326140dc | ||
|
|
8a47d6e3a6 | ||
|
|
24b61f32ff | ||
|
|
e4a25d022c | ||
|
|
b5192355f6 | ||
|
|
dabbb94faf | ||
|
|
de4390cd40 | ||
|
|
4af0aec54d | ||
|
|
d751f65e71 | ||
|
|
edae20f926 | ||
|
|
38272be037 | ||
|
|
2503126d52 | ||
|
|
c9723a1c1f | ||
|
|
be36600327 | ||
|
|
253e86a72e | ||
|
|
66ad1b8984 | ||
|
|
c1b7da69cc | ||
|
|
c94307adfe | ||
|
|
0bc1061119 | ||
|
|
ce40330de1 | ||
|
|
8e0a3428d7 | ||
|
|
70781d3691 | ||
|
|
3dd72b924e | ||
|
|
d07d255830 | ||
|
|
3ad221ea40 | ||
|
|
d5cc911167 | ||
|
|
61251275fe | ||
|
|
b5355dfde8 | ||
|
|
d62076003d | ||
|
|
e708a33cd9 | ||
|
|
485b9fdefb | ||
|
|
72b43bec8b | ||
|
|
edf046ece7 | ||
|
|
ec6430bad8 | ||
|
|
83649b891d | ||
|
|
6eb3edec47 | ||
|
|
6b23d74ad1 | ||
|
|
6d103373e2 | ||
|
|
735e4d1f9d | ||
|
|
79d2e2e79c | ||
|
|
89a71e7390 | ||
|
|
c0772b4461 | ||
|
|
8aa121c2be | ||
|
|
cf2f2934ad | ||
|
|
56efa9b167 | ||
|
|
9b2ab506c5 | ||
|
|
84b06fc893 | ||
|
|
675ff0d5d0 | ||
|
|
157092d97a | ||
|
|
6204c3663e | ||
|
|
72d2ceaa9a | ||
|
|
2a234b788e | ||
|
|
6f68e32bfb | ||
|
|
91c3f32673 | ||
|
|
ae4acda1bb | ||
|
|
d9f694932c | ||
|
|
a3bad89d57 | ||
|
|
9228ae633f | ||
|
|
9d641d88b7 | ||
|
|
f0d8f66eaa | ||
|
|
5350b5d04d | ||
|
|
78cdb72cd6 | ||
|
|
d0541e36c0 | ||
|
|
753d1d9cde | ||
|
|
5282ba1e2b | ||
|
|
57481ca694 | ||
|
|
8a47bf13ef | ||
|
|
2e936afd5f | ||
|
|
4c8ada3904 | ||
|
|
9f7d6c6bc1 | ||
|
|
bd4d958aaf | ||
|
|
96f163d932 | ||
|
|
4fc85e5a92 | ||
|
|
7a892ce793 | ||
|
|
7d3ea4886c | ||
|
|
316044d6fa | ||
|
|
fae4a2c066 | ||
|
|
1ad142782a | ||
|
|
43411a0fd8 | ||
|
|
ce00e57a2a | ||
|
|
d5324ee111 | ||
|
|
5f7c908280 | ||
|
|
5b70f240cf | ||
|
|
68a6717312 | ||
|
|
e6af49fbea | ||
|
|
98fd558382 | ||
|
|
3b0083c92a | ||
|
|
cabe4a3005 | ||
|
|
7f02fafa28 | ||
|
|
7655bc0542 | ||
|
|
4b8c9c340c | ||
|
|
1bc9985eb7 | ||
|
|
72f3f23c4d | ||
|
|
b32014578a | ||
|
|
8f2a3cbb70 | ||
|
|
a9216edbaa | ||
|
|
882911a863 | ||
|
|
9b1f99ba6b | ||
|
|
1bc4feb6c9 | ||
|
|
527512b811 | ||
|
|
9f5d7d5ba2 | ||
|
|
1e1c220084 | ||
|
|
72832a6158 | ||
|
|
96bcc5d902 | ||
|
|
ec9224bf9a | ||
|
|
b6aef59334 | ||
|
|
f11fe931f5 | ||
|
|
827f198177 | ||
|
|
a5fb382a29 | ||
|
|
5ab7146e19 | ||
|
|
63e25010d6 | ||
|
|
25006acc17 | ||
|
|
f75a041f59 | ||
|
|
eee26138fe | ||
|
|
099b69fb1c | ||
|
|
1870b44d23 | ||
|
|
2cfa6a3001 | ||
|
|
14d8d41658 | ||
|
|
3d70cc393f | ||
|
|
66fc166b96 | ||
|
|
afb216c202 | ||
|
|
b0a0fb5c97 | ||
|
|
1f5473465d | ||
|
|
4953cd71df | ||
|
|
f54f877f27 | ||
|
|
3bf973edc7 | ||
|
|
121fce7500 | ||
|
|
63d87c08c8 | ||
|
|
d0fe417f5c | ||
|
|
de92fb6a28 | ||
|
|
003a25ae5c | ||
|
|
8b1af08c6e | ||
|
|
e7b559d2ca | ||
|
|
a1c9869394 | ||
|
|
5e21e15f96 | ||
|
|
edcf938173 | ||
|
|
c071cd5a0e | ||
|
|
d4eba4409f | ||
|
|
408f60540f | ||
|
|
023f215c68 | ||
|
|
f167b35de9 | ||
|
|
6326d744c9 | ||
|
|
ff155f7891 | ||
|
|
4bb26c95a9 | ||
|
|
e157c63f68 | ||
|
|
ce54477f6f | ||
|
|
caf43c3d7c | ||
|
|
3d060eae6c | ||
|
|
e7c3832ba6 | ||
|
|
be4ea209e8 | ||
|
|
88efc75bab | ||
|
|
9418344db4 |
@@ -31,34 +31,6 @@ sed -i \
|
||||
-e '/systemd/d' \
|
||||
pyproject.toml
|
||||
|
||||
# Use poetry to do the installation. This ensures that the versions are all mutually
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
|
||||
# toml file. This means we don't have to ensure compatibility between old deps and
|
||||
# dev tools.
|
||||
|
||||
pip install toml wheel
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pip install poetry==1.3.2
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
@@ -9,16 +9,6 @@ set -eu
|
||||
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
|
||||
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
|
||||
|
||||
block Set Go Version
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -129,7 +129,7 @@ body:
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -34,11 +35,20 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: matrixdotorg/synapse
|
||||
images: |
|
||||
docker.io/matrixdotorg/synapse
|
||||
ghcr.io/matrix-org/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
|
||||
4
.github/workflows/docs-pr-netlify.yaml
vendored
4
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0
|
||||
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
uses: matrix-org/netlify-pr-preview@v2
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
|
||||
79
.github/workflows/docs.yaml
vendored
79
.github/workflows/docs.yaml
vendored
@@ -13,25 +13,10 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
pre:
|
||||
name: Calculate variables for GitHub Pages deployment
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
# Figure out the target directory.
|
||||
#
|
||||
# The target directory depends on the name of the branch
|
||||
@@ -55,11 +40,65 @@ jobs:
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
|
||||
|
||||
outputs:
|
||||
branch-version: ${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-docs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
destination_dir: ./${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
################################################################################
|
||||
pages-devdocs:
|
||||
name: GitHub Pages (developer docs)
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Set up Sphinx"
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.3.2"
|
||||
groups: "dev-docs"
|
||||
extras: ""
|
||||
|
||||
- name: Build the documentation
|
||||
run: |
|
||||
cd dev-docs
|
||||
poetry run make html
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./dev-docs/_build/html
|
||||
destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
|
||||
|
||||
14
.github/workflows/latest_deps.yml
vendored
14
.github/workflows/latest_deps.yml
vendored
@@ -27,9 +27,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
@@ -61,9 +59,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
@@ -134,9 +130,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
@@ -184,6 +178,8 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
|
||||
4
.github/workflows/release-artifacts.yml
vendored
4
.github/workflows/release-artifacts.yml
vendored
@@ -4,13 +4,15 @@ name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs)
|
||||
# of things breaking (but only build one set of debs). PRs skip
|
||||
# building wheels on macOS & ARM.
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
160
.github/workflows/tests.yml
vendored
160
.github/workflows/tests.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
@@ -33,6 +34,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -61,9 +65,60 @@ jobs:
|
||||
- run: .ci/scripts/check_lockfile.py
|
||||
|
||||
lint:
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
|
||||
with:
|
||||
typechecking-extras: "all"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
install-project: "false"
|
||||
|
||||
- name: Import order (isort)
|
||||
run: poetry run isort --check --diff .
|
||||
|
||||
- name: Code style (black)
|
||||
run: poetry run black --check --diff .
|
||||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
name: Typechecking
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
# We want to make use of type hints in optional dependencies too.
|
||||
extras: all
|
||||
# We have seen odd mypy failures that were resolved when we started
|
||||
# installing the project again:
|
||||
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
|
||||
# To make CI green, err towards caution and install the project.
|
||||
install-project: "true"
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
key: mypy-cache-${{ github.context.sha }}
|
||||
restore-keys: mypy-cache-
|
||||
|
||||
- name: Run mypy
|
||||
run: poetry run mypy
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -94,6 +149,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
poetry-version: "1.3.2"
|
||||
@@ -109,12 +167,8 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
@@ -131,10 +185,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
@@ -151,10 +202,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
# We use nightly so that it correctly groups together imports
|
||||
toolchain: nightly-2022-12-01
|
||||
@@ -168,6 +216,7 @@ jobs:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs:
|
||||
- lint
|
||||
- lint-mypy
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
@@ -219,12 +268,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -264,12 +308,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
@@ -282,34 +321,25 @@ jobs:
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
# Calculating the old-deps actually takes a bunch of time, so we cache the
|
||||
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
|
||||
# otherwise the `poetry install` step will error due to the poetry.lock
|
||||
# file being outdated.
|
||||
#
|
||||
# This caches the output of `Prepare old deps`, which should generate the
|
||||
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry-old-deps
|
||||
name: Cache poetry.lock
|
||||
with:
|
||||
path: |
|
||||
poetry.lock
|
||||
pyproject.toml
|
||||
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
run: .ci/scripts/prepare_old_deps.sh
|
||||
|
||||
# We only now install poetry so that `setup-python-poetry` caches the
|
||||
# right poetry.lock's dependencies.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all test"
|
||||
# Note: we install using `pip` here, not poetry. `poetry install` ignores the
|
||||
# build-system section (https://github.com/python-poetry/poetry/issues/6154), but
|
||||
# we explicitly want to test that you can `pip install` using the oldest version
|
||||
# of poetry-core and setuptools-rust.
|
||||
- run: pip install .[all,test]
|
||||
|
||||
- run: poetry run trial -j6 tests
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
# Sanity check we can import/run Synapse
|
||||
- run: python -m synapse.app.homeserver --help
|
||||
|
||||
- run: python -m twisted.trial -j6 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -385,12 +415,7 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -530,14 +555,11 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -561,12 +583,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
@@ -584,10 +601,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
23
.github/workflows/twisted_trunk.yml
vendored
23
.github/workflows/twisted_trunk.yml
vendored
@@ -5,6 +5,13 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
required: false
|
||||
default: 'trunk'
|
||||
type: string
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -18,9 +25,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -29,7 +34,7 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
@@ -43,9 +48,7 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -82,9 +85,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
|
||||
with:
|
||||
toolchain: stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
@@ -140,6 +141,8 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -15,9 +15,10 @@ _trial_temp*/
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# We do want the poetry and cargo lockfile.
|
||||
# We do want poetry, cargo and flake lockfiles.
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
!flake.lock
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
@@ -38,6 +39,9 @@ __pycache__/
|
||||
/.envrc
|
||||
.direnv/
|
||||
|
||||
# For nix/devenv users
|
||||
.devenv/
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
@@ -53,6 +57,7 @@ __pycache__/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/dev-docs/_build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
@@ -61,7 +66,7 @@ book/
|
||||
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
/main.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
|
||||
440
CHANGES.md
440
CHANGES.md
@@ -1,3 +1,439 @@
|
||||
Synapse 1.84.0 (2023-05-23)
|
||||
===========================
|
||||
|
||||
The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599))
|
||||
|
||||
|
||||
Synapse 1.84.0rc1 (2023-05-16)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197))
|
||||
- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224))
|
||||
- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312))
|
||||
- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516))
|
||||
- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528))
|
||||
- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536))
|
||||
- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559))
|
||||
- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523))
|
||||
- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555))
|
||||
- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564))
|
||||
- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544))
|
||||
- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560))
|
||||
- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025))
|
||||
- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470))
|
||||
- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509))
|
||||
- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522))
|
||||
- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527))
|
||||
- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529))
|
||||
- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531))
|
||||
- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545))
|
||||
- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534))
|
||||
- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535))
|
||||
- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539))
|
||||
- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542))
|
||||
- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543))
|
||||
- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548))
|
||||
- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549))
|
||||
- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550))
|
||||
- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551))
|
||||
- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552))
|
||||
- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553))
|
||||
- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558))
|
||||
- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562))
|
||||
- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563))
|
||||
- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565))
|
||||
- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570))
|
||||
- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576))
|
||||
- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577))
|
||||
- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588))
|
||||
- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589))
|
||||
- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590))
|
||||
- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591))
|
||||
- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592))
|
||||
- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594))
|
||||
|
||||
|
||||
Synapse 1.83.0 (2023-05-09)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.83.0rc1.
|
||||
|
||||
|
||||
Synapse 1.83.0rc1 (2023-05-02)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315))
|
||||
- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318))
|
||||
- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344))
|
||||
- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387))
|
||||
- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361))
|
||||
- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417))
|
||||
- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411))
|
||||
- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284))
|
||||
- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356))
|
||||
- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418))
|
||||
- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458))
|
||||
- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462))
|
||||
- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497))
|
||||
- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468))
|
||||
- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471))
|
||||
- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473))
|
||||
- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474))
|
||||
- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475))
|
||||
- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476))
|
||||
- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479))
|
||||
- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495))
|
||||
- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507))
|
||||
- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508))
|
||||
- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510))
|
||||
- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511))
|
||||
- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512))
|
||||
- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466))
|
||||
|
||||
|
||||
Synapse 1.82.0 (2023-04-25)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.82.0rc1.
|
||||
|
||||
|
||||
Synapse 1.82.0rc1 (2023-04-18)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333))
|
||||
- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431))
|
||||
- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181))
|
||||
- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410))
|
||||
- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423))
|
||||
- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425))
|
||||
- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372))
|
||||
- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373))
|
||||
- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374))
|
||||
- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375))
|
||||
- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376))
|
||||
- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404))
|
||||
- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412))
|
||||
- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413))
|
||||
- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414))
|
||||
- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415))
|
||||
- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441))
|
||||
- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442))
|
||||
- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443))
|
||||
- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444))
|
||||
- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445))
|
||||
- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446))
|
||||
- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447))
|
||||
- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448))
|
||||
- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429))
|
||||
- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393))
|
||||
- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394))
|
||||
- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395))
|
||||
- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406))
|
||||
- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409))
|
||||
- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421))
|
||||
- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427))
|
||||
- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432))
|
||||
- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433))
|
||||
- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435))
|
||||
- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438))
|
||||
- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453))
|
||||
|
||||
|
||||
Synapse 1.81.0 (2023-04-11)
|
||||
===========================
|
||||
|
||||
Synapse now attempts the versioned appservice paths before falling back to the
|
||||
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
|
||||
Usage of the legacy routes should be considered deprecated.
|
||||
|
||||
Additionally, Synapse has supported sending the application service access token
|
||||
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
|
||||
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
|
||||
query parameter. This is insecure and should be considered deprecated.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service routes and query parameter authorization.
|
||||
|
||||
|
||||
No significant changes since 1.81.0rc2.
|
||||
|
||||
|
||||
Synapse 1.81.0rc2 (2023-04-06)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403))
|
||||
|
||||
|
||||
Synapse 1.81.0rc1 (2023-04-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978))
|
||||
- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243))
|
||||
- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321))
|
||||
- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331))
|
||||
- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353))
|
||||
- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295))
|
||||
- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297))
|
||||
- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
|
||||
to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306))
|
||||
- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309))
|
||||
- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317))
|
||||
- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323))
|
||||
- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332))
|
||||
- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351))
|
||||
- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352))
|
||||
- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341))
|
||||
- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354))
|
||||
- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113))
|
||||
- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336))
|
||||
- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280))
|
||||
- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285))
|
||||
- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302))
|
||||
- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303))
|
||||
- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304))
|
||||
- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311))
|
||||
- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316))
|
||||
- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319))
|
||||
- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324))
|
||||
- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325))
|
||||
- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326))
|
||||
- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327))
|
||||
- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328))
|
||||
- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329))
|
||||
- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330))
|
||||
- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334))
|
||||
- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339))
|
||||
- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340))
|
||||
- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358))
|
||||
- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369))
|
||||
- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370))
|
||||
- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371))
|
||||
- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385))
|
||||
|
||||
|
||||
Synapse 1.80.0 (2023-03-28)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.80.0rc2.
|
||||
|
||||
|
||||
Synapse 1.80.0rc2 (2023-03-22)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300))
|
||||
- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
|
||||
would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301))
|
||||
|
||||
|
||||
Synapse 1.80.0rc1 (2023-03-21)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187))
|
||||
- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249))
|
||||
- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268))
|
||||
- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195))
|
||||
- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756))
|
||||
- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190))
|
||||
- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232))
|
||||
- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239))
|
||||
- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921))
|
||||
- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974))
|
||||
- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200))
|
||||
- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222))
|
||||
- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238))
|
||||
- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237))
|
||||
- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244))
|
||||
- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247))
|
||||
- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262))
|
||||
- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266))
|
||||
- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269))
|
||||
- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274))
|
||||
- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275))
|
||||
- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293))
|
||||
- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252))
|
||||
- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253))
|
||||
- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254))
|
||||
- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255))
|
||||
- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256))
|
||||
- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257))
|
||||
- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286))
|
||||
- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287))
|
||||
- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288))
|
||||
- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289))
|
||||
- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290))
|
||||
- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291))
|
||||
|
||||
|
||||
|
||||
Synapse 1.79.0 (2023-03-14)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.79.0rc2.
|
||||
|
||||
|
||||
Synapse 1.79.0rc2 (2023-03-13)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
|
||||
- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
|
||||
|
||||
|
||||
Synapse 1.79.0rc1 (2023-03-07)
|
||||
==============================
|
||||
|
||||
@@ -47,7 +483,7 @@ Improved Documentation
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044]
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
|
||||
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
|
||||
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
|
||||
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
|
||||
@@ -288,7 +724,7 @@ Those who are `poetry install`ing from source using our lockfile should ensure t
|
||||
Notes on faster joins
|
||||
---------------------
|
||||
|
||||
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
|
||||
The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
|
||||
|
||||
After a faster join, Synapse considers that room "partially joined". In this state, you should be able to
|
||||
|
||||
|
||||
49
Cargo.lock
generated
49
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.69"
|
||||
version = "1.0.71"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
|
||||
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -185,9 +185,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.46"
|
||||
version = "1.0.52"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -250,7 +250,7 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -276,9 +276,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.21"
|
||||
version = "1.0.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -294,9 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.7.1"
|
||||
version = "1.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
|
||||
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -305,9 +305,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.27"
|
||||
version = "0.6.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.152"
|
||||
version = "1.0.163"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
|
||||
checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.152"
|
||||
version = "1.0.163"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
|
||||
checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.94"
|
||||
version = "1.0.96"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea"
|
||||
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -375,6 +375,17 @@ dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synapse"
|
||||
version = "0.1.0"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition.
|
||||
@@ -1 +0,0 @@
|
||||
Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance of creating and authenticating events.
|
||||
@@ -1 +0,0 @@
|
||||
Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key.
|
||||
@@ -1 +0,0 @@
|
||||
Add a missing endpoint to the workers documentation.
|
||||
@@ -70,6 +70,10 @@ redis:
|
||||
port: 6379
|
||||
# dbid: <redis_logical_db_id>
|
||||
# password: <secret_password>
|
||||
# use_tls: True
|
||||
# certificate_file: <path_to_certificate>
|
||||
# private_key_file: <path_to_private_key>
|
||||
# ca_file: <path_to_ca_certificate>
|
||||
```
|
||||
|
||||
This assumes that your Redis service is called `redis` in your Docker Compose file.
|
||||
|
||||
84
debian/changelog
vendored
84
debian/changelog
vendored
@@ -1,3 +1,87 @@
|
||||
matrix-synapse-py3 (1.84.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.84.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.84.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100
|
||||
|
||||
matrix-synapse-py3 (1.83.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.83.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200
|
||||
|
||||
matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.83.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100
|
||||
|
||||
matrix-synapse-py3 (1.82.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.82.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.82.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100
|
||||
|
||||
matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.81.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
|
||||
|
||||
matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.80.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
|
||||
|
||||
matrix-synapse-py3 (1.79.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc1.
|
||||
|
||||
@@ -46,7 +46,7 @@ for port in 8080 8081 8082; do
|
||||
echo ''
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||
# Please don't accidentaly bork me with your fancy settings.
|
||||
# Please don't accidentally bork me with your fancy settings.
|
||||
listeners=$(cat <<-PORTLISTENERS
|
||||
# Configure server to listen on both $https_port and $port
|
||||
# This overides some of the default settings above
|
||||
@@ -80,12 +80,8 @@ for port in 8080 8081 8082; do
|
||||
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server'
|
||||
echo 'trusted_key_servers:'
|
||||
echo ' - server_name: "matrix.org"'
|
||||
echo ' accept_keys_insecurely: true'
|
||||
echo ''
|
||||
# Request keys directly from servers contacted over federation
|
||||
echo 'trusted_key_servers: []'
|
||||
|
||||
# Allow the servers to communicate over localhost.
|
||||
allow_list=$(cat <<-ALLOW_LIST
|
||||
|
||||
20
dev-docs/Makefile
Normal file
20
dev-docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
50
dev-docs/conf.py
Normal file
50
dev-docs/conf.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Synapse development"
|
||||
copyright = "2023, The Matrix.org Foundation C.I.C."
|
||||
author = "The Synapse Maintainers and Community"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
"autodoc2",
|
||||
"myst_parser",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
|
||||
# -- Options for Autodoc2 ----------------------------------------------------
|
||||
|
||||
autodoc2_docstring_parser_regexes = [
|
||||
# this will render all docstrings as 'MyST' Markdown
|
||||
(r".*", "myst"),
|
||||
]
|
||||
|
||||
autodoc2_packages = [
|
||||
{
|
||||
"path": "../synapse",
|
||||
# Don't render documentation for everything as a matter of course
|
||||
"auto_mode": False,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# -- Options for MyST (Markdown) ---------------------------------------------
|
||||
|
||||
# myst_heading_anchors = 2
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_static_path = ["_static"]
|
||||
22
dev-docs/index.rst
Normal file
22
dev-docs/index.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
.. Synapse Developer Documentation documentation master file, created by
|
||||
sphinx-quickstart on Mon Mar 13 08:59:51 2023.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to the Synapse Developer Documentation!
|
||||
===========================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
modules/federation_sender
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
5
dev-docs/modules/federation_sender.md
Normal file
5
dev-docs/modules/federation_sender.md
Normal file
@@ -0,0 +1,5 @@
|
||||
Federation Sender
|
||||
=================
|
||||
|
||||
```{autodoc2-docstring} synapse.federation.sender
|
||||
```
|
||||
@@ -37,9 +37,24 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
build-essential curl git libffi-dev libssl-dev pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install rust and ensure its in the PATH.
|
||||
# (Rust may be needed to compile `cryptography`---which is one of poetry's
|
||||
# dependencies---on platforms that don't have a `cryptography` wheel.
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
@@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
event_persister:2, \
|
||||
background_worker, \
|
||||
frontend_proxy, \
|
||||
event_creator, \
|
||||
@@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher"
|
||||
pusher, \
|
||||
stream_writers=account_data+presence+receipts+to_device+typing"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
|
||||
@@ -6,10 +6,6 @@
|
||||
worker_app: "{{ app }}"
|
||||
worker_name: "{{ name }}"
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: {{ port }}
|
||||
|
||||
@@ -19,8 +19,15 @@
|
||||
# The environment variables it reads are:
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
||||
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
||||
# multiply that worker. Append multiple worker types with '+' to merge the
|
||||
# worker types into a single worker. Add a name and a '=' to the front of a
|
||||
# worker type to give this instance a name in logs and nginx.
|
||||
# Examples:
|
||||
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -40,15 +47,35 @@
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
SupportsIndex,
|
||||
)
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
WORKER_PLACEHOLDER_NAME = "placeholder_name"
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
@@ -70,11 +97,13 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
|
||||
"shared_extra_conf": {
|
||||
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.media_repository",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
@@ -87,7 +116,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
@@ -95,7 +124,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
|
||||
"shared_extra_conf": {
|
||||
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
@@ -135,6 +166,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/versions$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register/available$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
@@ -143,6 +175,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
|
||||
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -192,9 +227,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
||||
# worker, and it should be named background_worker1
|
||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
||||
# This worker cannot be sharded. Therefore, there should only ever be one
|
||||
# background worker. This is enforced for the safety of your database.
|
||||
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_creator": {
|
||||
@@ -275,7 +310,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_type} {{
|
||||
upstream {upstream_worker_base_name} {{
|
||||
{body}
|
||||
}}
|
||||
"""
|
||||
@@ -326,7 +361,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
|
||||
def add_worker_roles_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_types_set: Set[str],
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
@@ -334,22 +369,36 @@ def add_worker_roles_to_shared_config(
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
shared_config: The config dict that all worker instances share (after being
|
||||
converted to YAML)
|
||||
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
This list can be a single worker type or multiple.
|
||||
worker_name: The name of the worker instance.
|
||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||
"""
|
||||
# The instance_map config field marks the workers that write to various replication streams
|
||||
# The instance_map config field marks the workers that write to various replication
|
||||
# streams
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
|
||||
# Worker-type specific sharding config
|
||||
if worker_type == "pusher":
|
||||
# This is a list of the stream_writers that there can be only one of. Events can be
|
||||
# sharded, and therefore doesn't belong here.
|
||||
singular_stream_writers = [
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"to_device",
|
||||
"typing",
|
||||
]
|
||||
|
||||
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||
# roles, check each.
|
||||
if "pusher" in worker_types_set:
|
||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "federation_sender":
|
||||
if "federation_sender" in worker_types_set:
|
||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "event_persister":
|
||||
if "event_persister" in worker_types_set:
|
||||
# Event persisters write to the events stream, so we need to update
|
||||
# the list of event stream writers
|
||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||
@@ -362,19 +411,154 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
for worker in worker_types_set:
|
||||
if worker in singular_stream_writers:
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
existing_dict: Optional[Dict[str, Any]],
|
||||
to_be_merged_dict: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""When given an existing dict of worker template configuration consisting with both
|
||||
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
|
||||
return new dict.
|
||||
|
||||
Args:
|
||||
existing_dict: Either an existing worker template or a fresh blank one.
|
||||
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
|
||||
existing_dict.
|
||||
Returns: The newly merged together dict values.
|
||||
"""
|
||||
new_dict: Dict[str, Any] = {}
|
||||
if not existing_dict:
|
||||
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
|
||||
new_dict = to_be_merged_dict.copy()
|
||||
else:
|
||||
for i in to_be_merged_dict.keys():
|
||||
if (i == "endpoint_patterns") or (i == "listener_resources"):
|
||||
# merge the two lists, remove duplicates
|
||||
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
|
||||
elif i == "shared_extra_conf":
|
||||
# merge dictionary's, the worker name will be replaced later
|
||||
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
|
||||
elif i == "worker_extra_conf":
|
||||
# There is only one worker type that has a 'worker_extra_conf' and it is
|
||||
# the media_repo. Since duplicate worker types on the same worker don't
|
||||
# work, this is fine.
|
||||
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
|
||||
else:
|
||||
# Everything else should be identical, like "app", which only works
|
||||
# because all apps are now generic_workers.
|
||||
new_dict[i] = to_be_merged_dict[i]
|
||||
return new_dict
|
||||
|
||||
|
||||
def insert_worker_name_for_worker_config(
|
||||
existing_dict: Dict[str, Any], worker_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Insert a given worker name into the worker's configuration dict.
|
||||
|
||||
Args:
|
||||
existing_dict: The worker_config dict that is imported into shared_config.
|
||||
worker_name: The name of the worker to insert.
|
||||
Returns: Copy of the dict with newly inserted worker name
|
||||
"""
|
||||
dict_to_edit = existing_dict.copy()
|
||||
for k, v in dict_to_edit["shared_extra_conf"].items():
|
||||
# Only proceed if it's the placeholder name string
|
||||
if v == WORKER_PLACEHOLDER_NAME:
|
||||
dict_to_edit["shared_extra_conf"][k] = worker_name
|
||||
return dict_to_edit
|
||||
|
||||
|
||||
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
||||
"""
|
||||
Apply multiplier(if found) by returning a new expanded list with some basic error
|
||||
checking.
|
||||
|
||||
Args:
|
||||
worker_types: The unprocessed List of requested workers
|
||||
Returns:
|
||||
A new list with all requested workers expanded.
|
||||
"""
|
||||
# Checking performed:
|
||||
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
||||
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
||||
# given
|
||||
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
||||
# scripting and automated expansion and is intended behaviour.
|
||||
# 4. if worker:NaN or is a negative number, it will error and log it.
|
||||
new_worker_types = []
|
||||
for worker_type in worker_types:
|
||||
if ":" in worker_type:
|
||||
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
||||
worker_count = 0
|
||||
# Should only be 2 components, a type of worker(s) and an integer as a
|
||||
# string. Cast the number as an int then it can be used as a counter.
|
||||
try:
|
||||
worker_count = int(worker_type_components[1])
|
||||
except ValueError:
|
||||
error(
|
||||
f"Bad number in worker count for '{worker_type}': "
|
||||
f"'{worker_type_components[1]}' is not an integer"
|
||||
)
|
||||
|
||||
# As long as there are more than 0, we add one to the list to make below.
|
||||
for _ in range(worker_count):
|
||||
new_worker_types.append(worker_type_components[0])
|
||||
|
||||
else:
|
||||
# If it's not a real worker_type, it will error out later.
|
||||
new_worker_types.append(worker_type)
|
||||
return new_worker_types
|
||||
|
||||
|
||||
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
|
||||
"""Helper to check to make sure worker types that cannot have multiples do not.
|
||||
|
||||
Args:
|
||||
worker_type: The type of worker to check against.
|
||||
Returns: True if allowed, False if not
|
||||
"""
|
||||
return worker_type not in [
|
||||
"background_worker",
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"typing",
|
||||
"to_device",
|
||||
]
|
||||
|
||||
|
||||
def split_and_strip_string(
|
||||
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
||||
) -> List[str]:
|
||||
"""
|
||||
Helper to split a string on split_char and strip whitespace from each end of each
|
||||
element.
|
||||
Args:
|
||||
given_string: The string to split
|
||||
split_char: The character to split the string on
|
||||
max_split: kwarg for split() to limit how many times the split() happens
|
||||
Returns:
|
||||
A List of strings
|
||||
"""
|
||||
# Removes whitespace from ends of result strings before adding to list. Allow for
|
||||
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
||||
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -389,33 +573,157 @@ def generate_base_homeserver_config() -> None:
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
requested_worker_types: List[str],
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Read the desired list of requested workers and prepare the data for use in
|
||||
generating worker config files while also checking for potential gotchas.
|
||||
|
||||
Args:
|
||||
requested_worker_types: The list formed from the split environment variable
|
||||
containing the unprocessed requests for workers.
|
||||
|
||||
Returns: A dict of worker names to set of worker types. Format:
|
||||
{'worker_name':
|
||||
{'worker_type', 'worker_type2'}
|
||||
}
|
||||
"""
|
||||
# A counter of worker_base_name -> int. Used for determining the name for a given
|
||||
# worker when generating its config file, as each worker's name is just
|
||||
# worker_base_name followed by instance number
|
||||
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# Similar to above, but more finely grained. This is used to determine we don't have
|
||||
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
||||
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# The final result of all this processing
|
||||
dict_to_return: Dict[str, Set[str]] = {}
|
||||
|
||||
# Handle any multipliers requested for given workers.
|
||||
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
||||
requested_worker_types
|
||||
)
|
||||
|
||||
# Process each worker_type_string
|
||||
# Examples of expected formats:
|
||||
# - requested_name=type1+type2+type3
|
||||
# - synchrotron
|
||||
# - event_creator+event_persister
|
||||
for worker_type_string in multiple_processed_worker_types:
|
||||
# First, if a name is requested, use that — otherwise generate one.
|
||||
worker_base_name: str = ""
|
||||
if "=" in worker_type_string:
|
||||
# Split on "=", remove extra whitespace from ends then make list
|
||||
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
||||
if len(worker_type_split) > 2:
|
||||
error(
|
||||
"There should only be one '=' in the worker type string. "
|
||||
f"Please fix: {worker_type_string}"
|
||||
)
|
||||
|
||||
# Assign the name
|
||||
worker_base_name = worker_type_split[0]
|
||||
|
||||
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
||||
# Apply a fairly narrow regex to the worker names. Some characters
|
||||
# aren't safe for use in file paths or nginx configurations.
|
||||
# Don't allow to end with a number because we'll add a number
|
||||
# ourselves in a moment.
|
||||
error(
|
||||
"Invalid worker name; please choose a name consisting of "
|
||||
"alphanumeric letters, _ + -, but not ending with a digit: "
|
||||
f"{worker_base_name!r}"
|
||||
)
|
||||
|
||||
# Continue processing the remainder of the worker_type string
|
||||
# with the name override removed.
|
||||
worker_type_string = worker_type_split[1]
|
||||
|
||||
# Split the worker_type_string on "+", remove whitespace from ends then make
|
||||
# the list a set so it's deduplicated.
|
||||
worker_types_set: Set[str] = set(
|
||||
split_and_strip_string(worker_type_string, "+")
|
||||
)
|
||||
|
||||
if not worker_base_name:
|
||||
# No base name specified: generate one deterministically from set of
|
||||
# types
|
||||
worker_base_name = "+".join(sorted(worker_types_set))
|
||||
|
||||
# At this point, we have:
|
||||
# worker_base_name which is the name for the worker, without counter.
|
||||
# worker_types_set which is the set of worker types for this worker.
|
||||
|
||||
# Validate worker_type and make sure we don't allow sharding for a worker type
|
||||
# that doesn't support it. Will error and stop if it is a problem,
|
||||
# e.g. 'background_worker'.
|
||||
for worker_type in worker_types_set:
|
||||
# Verify this is a real defined worker type. If it's not, stop everything so
|
||||
# it can be fixed.
|
||||
if worker_type not in WORKERS_CONFIG:
|
||||
error(
|
||||
f"{worker_type} is an unknown worker type! Was found in "
|
||||
f"'{worker_type_string}'. Please fix!"
|
||||
)
|
||||
|
||||
if worker_type in worker_type_shard_counter:
|
||||
if not is_sharding_allowed_for_worker_type(worker_type):
|
||||
error(
|
||||
f"There can be only a single worker with {worker_type} "
|
||||
"type. Please recount and remove."
|
||||
)
|
||||
# Not in shard counter, must not have seen it yet, add it.
|
||||
worker_type_shard_counter[worker_type] += 1
|
||||
|
||||
# Generate the number for the worker using incrementing counter
|
||||
worker_base_name_counter[worker_base_name] += 1
|
||||
worker_number = worker_base_name_counter[worker_base_name]
|
||||
worker_name = f"{worker_base_name}{worker_number}"
|
||||
|
||||
if worker_number > 1:
|
||||
# If this isn't the first worker, check that we don't have a confusing
|
||||
# mixture of worker types with the same base name.
|
||||
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
||||
if first_worker_with_base_name != worker_types_set:
|
||||
error(
|
||||
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
||||
f"{worker_types_set!r}. It is already in use by "
|
||||
f"worker_type(s): {first_worker_with_base_name!r}"
|
||||
)
|
||||
|
||||
dict_to_return[worker_name] = worker_types_set
|
||||
|
||||
return dict_to_return
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
environ: Mapping[str, str], config_path: str, data_dir: str
|
||||
environ: Mapping[str, str],
|
||||
config_path: str,
|
||||
data_dir: str,
|
||||
requested_worker_types: Dict[str, Set[str]],
|
||||
) -> None:
|
||||
"""Read the desired list of workers from environment variables and generate
|
||||
shared homeserver, nginx and supervisord configs.
|
||||
"""Read the desired workers(if any) that is passed in and generate shared
|
||||
homeserver, nginx and supervisord configs.
|
||||
|
||||
Args:
|
||||
environ: os.environ instance.
|
||||
config_path: The location of the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
requested_worker_types: A Dict containing requested workers in the format of
|
||||
{'worker_name1': {'worker_type', ...}}
|
||||
"""
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
||||
# the main Synapse process as well as all workers.
|
||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result to the shared
|
||||
# config file.
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
"bind_address": "127.0.0.1",
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
@@ -427,9 +735,9 @@ def generate_worker_files(
|
||||
listeners += original_listeners
|
||||
|
||||
# The shared homeserver config. The contents of which will be inserted into the
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
# base shared worker jinja2 template. This config file will be passed to all
|
||||
# workers, included Synapse's main process. It is intended mainly for disabling
|
||||
# functionality when certain workers are spun up, and adding a replication listener.
|
||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||
|
||||
# List of dicts that describe workers.
|
||||
@@ -437,31 +745,20 @@ def generate_worker_files(
|
||||
# program blocks.
|
||||
worker_descriptors: List[Dict[str, Any]] = []
|
||||
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
||||
# ports of each worker. For example:
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
||||
# type to the ports of each worker. For example:
|
||||
# {
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
||||
# of the same worker.
|
||||
#
|
||||
# An nginx site config that will be amended to depending on the workers that are
|
||||
# spun up. To be placed in /etc/nginx/conf.d.
|
||||
nginx_locations = {}
|
||||
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
||||
# will be placed after the proxy_pass directive. The main benefit to representing
|
||||
# this data as a dict over a str is that we can easily deduplicate endpoints
|
||||
# across multiple instances of the same worker. The final rendering will be combined
|
||||
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
||||
nginx_locations: Dict[str, str] = {}
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -469,66 +766,57 @@ def generate_worker_files(
|
||||
# Start worker ports from this arbitrary port
|
||||
worker_port = 18009
|
||||
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter: Dict[str, int] = {}
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
# Map locations to upstreams (corresponding to worker types) in Nginx
|
||||
# but only if we use the appropriate worker type
|
||||
for worker_type in all_worker_types_in_use:
|
||||
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
|
||||
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
# For each worker type specified by the user, create config values and write it's
|
||||
# yaml config file
|
||||
for worker_name, worker_types_set in requested_worker_types.items():
|
||||
# The collected and processed data will live here.
|
||||
worker_config: Dict[str, Any] = {}
|
||||
|
||||
# Merge all worker config templates for this worker into a single config
|
||||
for worker_type in worker_types_set:
|
||||
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
|
||||
|
||||
# Merge worker type template configuration data. It's a combination of lists
|
||||
# and dicts, so use this helper.
|
||||
worker_config = merge_worker_template_configs(
|
||||
worker_config, copy_of_template_config
|
||||
)
|
||||
|
||||
# Replace placeholder names in the config template with the actual worker name.
|
||||
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
|
||||
|
||||
# Name workers by their type concatenated with an incrementing number
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
# Update the shared config with any worker_type specific options. The first of a
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
shared_config, worker_types_set, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
||||
for pattern in worker_config["endpoint_patterns"]:
|
||||
# Determine whether we need to load-balance this worker
|
||||
if worker_type_total_count > 1:
|
||||
# Create or add to a load-balanced upstream for this worker
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
# Upstreams are named after the worker_type
|
||||
upstream = "http://" + worker_type
|
||||
else:
|
||||
upstream = "http://localhost:%d" % (worker_port,)
|
||||
|
||||
# Note that this endpoint should proxy to this upstream
|
||||
nginx_locations[pattern] = upstream
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||
|
||||
# Then a worker config file
|
||||
@@ -539,6 +827,10 @@ def generate_worker_files(
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
for worker_type in worker_types_set:
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
worker_port += 1
|
||||
|
||||
# Build the nginx location config blocks
|
||||
@@ -551,15 +843,14 @@ def generate_worker_files(
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
upstream_worker_type=upstream_worker_type,
|
||||
upstream_worker_base_name=upstream_worker_base_name,
|
||||
body=body,
|
||||
)
|
||||
|
||||
@@ -580,7 +871,15 @@ def generate_worker_files(
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
workers_in_use = len(worker_types) > 0
|
||||
workers_in_use = len(requested_worker_types) > 0
|
||||
|
||||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -678,13 +977,26 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
generate_base_homeserver_config()
|
||||
else:
|
||||
log("Base homeserver config exists—not regenerating")
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
||||
# Don't re-configure workers in this instance.
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of
|
||||
# file). Don't re-configure workers in this instance.
|
||||
mark_filepath = "/conf/workers_have_been_configured"
|
||||
if not os.path.exists(mark_filepath):
|
||||
# Collect and validate worker_type requests
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
# Only process worker_types if they exist
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
requested_worker_types: Dict[str, Any] = {}
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = split_and_strip_string(worker_types_env, ",")
|
||||
requested_worker_types = parse_worker_types(worker_types)
|
||||
|
||||
# Always regenerate all other config files
|
||||
log("Generating worker config files")
|
||||
generate_worker_files(environ, config_path, data_dir)
|
||||
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
||||
|
||||
# Mark workers as being configured
|
||||
with open(mark_filepath, "w") as f:
|
||||
|
||||
@@ -57,6 +57,7 @@
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
- [Event Reports](admin_api/event_reports.md)
|
||||
- [Experimental Features](admin_api/experimental_features.md)
|
||||
- [Media](admin_api/media_admin_api.md)
|
||||
- [Purge History](admin_api/purge_history_api.md)
|
||||
- [Register Users](admin_api/register_api.md)
|
||||
|
||||
55
docs/admin_api/experimental_features.md
Normal file
55
docs/admin_api/experimental_features.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"msc3026":true,
|
||||
"msc3881":true
|
||||
}
|
||||
}
|
||||
```
|
||||
where true is used to enable the feature, and false is used to disable the feature.
|
||||
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
PUT /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
It will return a list of possible features and indicate whether they are enabled or disabled for the
|
||||
user like so:
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"msc3026": true,
|
||||
"msc3881": false,
|
||||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -81,3 +81,52 @@ The following fields are returned in the JSON response body:
|
||||
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
|
||||
* `next_token` - integer - Opaque value used for pagination. See above.
|
||||
* `total` - integer - Total number of users after filtering.
|
||||
|
||||
|
||||
# Get largest rooms by size in database
|
||||
|
||||
Returns the 10 largest rooms and an estimate of how much space in the database
|
||||
they are taking.
|
||||
|
||||
This does not include the size of any associated media associated with the room.
|
||||
|
||||
Returns an error on SQLite.
|
||||
|
||||
*Note:* This uses the planner statistics from PostgreSQL to do the estimates,
|
||||
which means that the returned information can vary widely from reality. However,
|
||||
it should be enough to get a rough idea of where database disk space is going.
|
||||
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/statistics/database/rooms
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"estimated_size": 47325417353
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `rooms` - An array of objects, sorted by largest room first. Objects contain
|
||||
the following fields:
|
||||
- `room_id` - string - The room ID.
|
||||
- `estimated_size` - integer - Estimated disk space used in bytes by the room
|
||||
in the database.
|
||||
|
||||
|
||||
*Added in Synapse 1.83.0*
|
||||
|
||||
@@ -62,7 +62,7 @@ URL parameters:
|
||||
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
|
||||
## Create or modify Account
|
||||
## Create or modify account
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific `user_id`.
|
||||
@@ -78,28 +78,29 @@ with a body of:
|
||||
```json
|
||||
{
|
||||
"password": "user_password",
|
||||
"displayname": "User",
|
||||
"logout_devices": false,
|
||||
"displayname": "Alice Marigold",
|
||||
"avatar_url": "mxc://example.com/abcde12345",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
"address": "alice@example.com"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
"address": "alice@domain.org"
|
||||
}
|
||||
],
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
"external_id": "<user_id_provider_1>"
|
||||
"auth_provider": "example",
|
||||
"external_id": "12345"
|
||||
},
|
||||
{
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
"auth_provider": "example2",
|
||||
"external_id": "abc54321"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false,
|
||||
"user_type": null
|
||||
@@ -112,41 +113,51 @@ Returns HTTP status code:
|
||||
|
||||
URL parameters:
|
||||
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
- `user_id` - A fully-qualified user id. For example, `@user:server.com`.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `password` - string, optional. If provided, the user's password is updated and all
|
||||
- `password` - **string**, optional. If provided, the user's password is updated and all
|
||||
devices are logged out, unless `logout_devices` is set to `false`.
|
||||
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
|
||||
- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't
|
||||
logged out even when `password` is provided.
|
||||
- `displayname` - string, optional, defaults to the value of `user_id`.
|
||||
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - string. Value of third-party ID.
|
||||
belonging to a user.
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in the homeserver configuration. Note that no error is raised if the provided
|
||||
value is not in the homeserver configuration.
|
||||
- `external_id` - string, user ID in the external identity provider.
|
||||
- `avatar_url` - string, optional, must be a
|
||||
- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name
|
||||
will be removed.
|
||||
- `avatar_url` - **string**, optional. Must be a
|
||||
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
- `admin` - bool, optional, defaults to `false`.
|
||||
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to `false` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on
|
||||
deactivating users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - string or null, optional. If provided, the user type will be
|
||||
adjusted. If `null` given, the user type will be cleared. Other
|
||||
allowed options are: `bot` and `support`.
|
||||
If set to an empty string (`""`), the user's avatar is removed.
|
||||
- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are
|
||||
entirely replaced with the given list. Each item in the array is an object with the following
|
||||
fields:
|
||||
- `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number).
|
||||
- `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or
|
||||
`447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number
|
||||
with country code "1") for `msisdn`.
|
||||
Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove
|
||||
that threepid from any identity servers it is aware has a binding for it.
|
||||
- `external_ids` - **array**, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). More details are in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
|
||||
The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
|
||||
provided value is not in the homeserver configuration.
|
||||
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
|
||||
When the user logs in to the identity provider, this must be the unique ID that they map to.
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
granting them access to the Admin API, among other things.
|
||||
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
Note: the `password` field must also be set if both of the following are true:
|
||||
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
|
||||
- Users are allowed to set their password on this homeserver (both `password_config.enabled` and
|
||||
`password_config.localdb_enabled` config options are set to `true`).
|
||||
Users' passwords are wiped upon account deactivation, hence the need to set a new one here.
|
||||
|
||||
In order to re-activate an account `deactivated` must be set to `false`. If
|
||||
users do not login via single-sign-on, a new `password` must be provided.
|
||||
Note: a user cannot be erased with this API. For more details on
|
||||
deactivating and erasing users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - **string** or null, optional. If not provided, the user type will be
|
||||
not be changed. If `null` is given, the user type will be cleared.
|
||||
Other allowed options are: `bot` and `support`.
|
||||
|
||||
## List Accounts
|
||||
|
||||
|
||||
@@ -346,6 +346,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -155,43 +155,11 @@ def run_upgrade(
|
||||
Boolean columns require special treatment, since SQLite treats booleans the
|
||||
same as integers.
|
||||
|
||||
There are three separate aspects to this:
|
||||
|
||||
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
|
||||
the integer value from SQLite to a boolean before writing the value to the
|
||||
postgres database.
|
||||
|
||||
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
||||
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
||||
supported. This makes it necessary to avoid using `TRUE` and `FALSE`
|
||||
constants in SQL commands.
|
||||
|
||||
For example, to insert a `TRUE` value into the database, write:
|
||||
|
||||
```python
|
||||
txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, ))
|
||||
```
|
||||
|
||||
* Default values for new boolean columns present a particular
|
||||
difficulty. Generally it is best to create separate schema files for
|
||||
Postgres and SQLite. For example:
|
||||
|
||||
```sql
|
||||
# in 00delta.sql.postgres:
|
||||
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE;
|
||||
```
|
||||
|
||||
```sql
|
||||
# in 00delta.sql.sqlite:
|
||||
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0;
|
||||
```
|
||||
|
||||
Note that there is a particularly insidious failure mode here: the Postgres
|
||||
flavour will be accepted by SQLite 3.22, but will give a column whose
|
||||
default value is the **string** `"FALSE"` - which, when cast back to a boolean
|
||||
in Python, evaluates to `True`.
|
||||
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
|
||||
@@ -103,6 +103,9 @@ Called during a logout request for a user. It is passed the qualified user ID, t
|
||||
deactivated device (if any: access tokens are occasionally created without an associated
|
||||
device ID), and the (now deactivated) access token.
|
||||
|
||||
Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them
|
||||
to still be present.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
### `get_username_for_registration`
|
||||
|
||||
@@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it.
|
||||
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
|
||||
```
|
||||
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
@@ -585,7 +585,9 @@ oidc_providers:
|
||||
scopes: ["read"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
subject_template: "{{ user.id }}"
|
||||
localpart_template: "{{ user.username }}"
|
||||
display_name_template: "{{ user.display_name }}"
|
||||
```
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
@@ -26,8 +26,8 @@ for most users.
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
the docker-compose file available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
|
||||
which can be used with the docker-compose file available at
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||
Further information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
|
||||
@@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands
|
||||
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
|
||||
the format of `<row>` is defined by the individual streams. The
|
||||
`<instance_name>` is the name of the Synapse process that generated the data
|
||||
(usually "master").
|
||||
(usually "master"). We expect an RDATA for every row in the DB.
|
||||
|
||||
Error reporting happens by either the client or server sending an ERROR
|
||||
command, and usually the connection will be closed.
|
||||
@@ -107,7 +107,7 @@ reconnect, following the steps above.
|
||||
If the server sends messages faster than the client can consume them the
|
||||
server will first buffer a (fairly large) number of commands and then
|
||||
disconnect the client. This ensures that we don't queue up an unbounded
|
||||
number of commands in memory and gives us a potential oppurtunity to
|
||||
number of commands in memory and gives us a potential opportunity to
|
||||
squawk loudly. When/if the client recovers it can reconnect to the
|
||||
server and ask for missed messages.
|
||||
|
||||
@@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on
|
||||
connection errors.
|
||||
|
||||
The client should keep track of the token in the last RDATA command
|
||||
received for each stream so that on reconneciton it can start streaming
|
||||
received for each stream so that on reconnection it can start streaming
|
||||
from the correct place. Note: not all RDATA have valid tokens due to
|
||||
batching. See `RdataCommand` for more details.
|
||||
|
||||
@@ -188,7 +188,8 @@ client (C):
|
||||
Two positions are included, the "new" position and the last position sent respectively.
|
||||
This allows servers to tell instances that the positions have advanced but no
|
||||
data has been written, without clients needlessly checking to see if they
|
||||
have missed any updates.
|
||||
have missed any updates. Instances will only fetch stuff if there is a gap between
|
||||
their current position and the given last position.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
|
||||
128
docs/upgrade.md
128
docs/upgrade.md
@@ -88,6 +88,123 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.84.0
|
||||
|
||||
## Deprecation of `worker_replication_*` configuration settings
|
||||
|
||||
When using workers,
|
||||
|
||||
* `worker_replication_host`
|
||||
* `worker_replication_http_port`
|
||||
* `worker_replication_http_tls`
|
||||
|
||||
should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map`
|
||||
in the shared YAML configuration, using the name `main`.
|
||||
|
||||
The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0.
|
||||
|
||||
|
||||
### Example change
|
||||
|
||||
#### Before:
|
||||
|
||||
Shared YAML
|
||||
```yaml
|
||||
instance_map:
|
||||
generic_worker1:
|
||||
host: localhost
|
||||
port: 5678
|
||||
tls: false
|
||||
```
|
||||
|
||||
Worker YAML
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
worker_replication_host: localhost
|
||||
worker_replication_http_port: 3456
|
||||
worker_replication_http_tls: false
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 1234
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
port: 5678
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
```
|
||||
|
||||
|
||||
#### After:
|
||||
|
||||
Shared YAML
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 3456
|
||||
tls: false
|
||||
generic_worker1:
|
||||
host: localhost
|
||||
port: 5678
|
||||
tls: false
|
||||
```
|
||||
|
||||
Worker YAML
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 1234
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
port: 5678
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
|
||||
```
|
||||
Notes:
|
||||
* `tls` is optional but mirrors the functionality of `worker_replication_http_tls`
|
||||
|
||||
|
||||
# Upgrading to v1.81.0
|
||||
|
||||
## Application service path & authentication deprecations
|
||||
|
||||
Synapse now attempts the versioned appservice paths before falling back to the
|
||||
[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
|
||||
Usage of the legacy routes should be considered deprecated.
|
||||
|
||||
Additionally, Synapse has supported sending the application service access token
|
||||
via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
|
||||
since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
|
||||
query parameter. This is insecure and should be considered deprecated.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service routes and query parameter authorization.
|
||||
|
||||
# Upgrading to v1.80.0
|
||||
|
||||
## Reporting events error code change
|
||||
|
||||
Before this update, the
|
||||
[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
|
||||
endpoint would return a `403` if a user attempted to report an event that they did not have access to.
|
||||
This endpoint will now return a `404` in this case instead.
|
||||
|
||||
Clients that implement event reporting should check that their error handling code will handle this
|
||||
change.
|
||||
|
||||
# Upgrading to v1.79.0
|
||||
|
||||
## The `on_threepid_bind` module callback method has been deprecated
|
||||
@@ -171,6 +288,17 @@ Docker images and Debian packages need nothing specific as they already
|
||||
include or specify ICU as an explicit dependency.
|
||||
|
||||
|
||||
## User directory rebuild
|
||||
|
||||
Synapse 1.74 queues a background update
|
||||
[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643),
|
||||
in order to fix missing or erroneous entries.
|
||||
|
||||
When this update begins, the user directory will be cleared out and rebuilt from
|
||||
scratch. User directory lookups will be incomplete until the rebuild completes.
|
||||
Admins can monitor the rebuild's progress by using the
|
||||
[Background update Admin API](usage/administration/admin_api/background_updates.md#status).
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
@@ -577,6 +577,10 @@ delete any device that hasn't been accessed for more than the specified amount o
|
||||
|
||||
Defaults to no duration, which means devices are never pruned.
|
||||
|
||||
**Note:** This task will always run on the main process, regardless of the value of
|
||||
`run_background_tasks_on`. This is due to workers currently not having the ability to
|
||||
delete devices.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
delete_stale_devices_after: 1y
|
||||
@@ -1521,7 +1525,7 @@ This option specifies several limits for login:
|
||||
address. Defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
|
||||
* `account` ratelimits login requests based on the account the
|
||||
client is attempting to log into. Defaults to `per_second: 0.03`,
|
||||
client is attempting to log into. Defaults to `per_second: 0.003`,
|
||||
`burst_count: 5`.
|
||||
|
||||
* `failed_attempts` ratelimits login requests based on the account the
|
||||
@@ -1764,6 +1768,30 @@ Example configuration:
|
||||
max_image_pixels: 35M
|
||||
```
|
||||
---
|
||||
### `prevent_media_downloads_from`
|
||||
|
||||
A list of domains to never download media from. Media from these
|
||||
domains that is already downloaded will not be deleted, but will be
|
||||
inaccessible to users. This option does not affect admin APIs trying
|
||||
to download/operate on media.
|
||||
|
||||
This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
prevent_media_downloads_from:
|
||||
- evil.example.org
|
||||
- evil2.example.org
|
||||
```
|
||||
---
|
||||
### `dynamic_thumbnails`
|
||||
|
||||
Whether to generate new thumbnails on the fly to precisely match
|
||||
@@ -3100,6 +3128,11 @@ Options for each entry include:
|
||||
match a pre-existing account instead of failing. This could be used if
|
||||
switching from password logins to OIDC. Defaults to false.
|
||||
|
||||
* `enable_registration`: set to 'false' to disable automatic registration of new
|
||||
users. This allows the OIDC SSO flow to be limited to sign in only, rather than
|
||||
automatically registering users that have a valid SSO login but do not have
|
||||
a pre-registered account. Defaults to true.
|
||||
|
||||
* `user_mapping_provider`: Configuration for how attributes returned from a OIDC
|
||||
provider are mapped onto a matrix user. This setting has the following
|
||||
sub-properties:
|
||||
@@ -3216,6 +3249,7 @@ oidc_providers:
|
||||
userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
skip_verification: true
|
||||
enable_registration: true
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
@@ -3432,6 +3466,9 @@ This option has a number of sub-options. They are as follows:
|
||||
user has unread messages in. Defaults to true, meaning push clients will see the number of
|
||||
rooms with unread messages in them. Set to false to instead send the number
|
||||
of unread messages.
|
||||
* `jitter_delay`: Delays push notifications by a random amount up to the given
|
||||
duration. Useful for mitigating timing attacks. Optional, defaults to no
|
||||
delay. _Added in Synapse 1.84.0._
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3439,6 +3476,7 @@ push:
|
||||
enabled: true
|
||||
include_content: false
|
||||
group_unread_count_by_room: false
|
||||
jitter_delay: "10s"
|
||||
```
|
||||
---
|
||||
## Rooms
|
||||
@@ -3685,6 +3723,16 @@ default_power_level_content_override:
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
---
|
||||
### `forget_rooms_on_leave`
|
||||
|
||||
Set to true to automatically forget rooms for users when they leave them, either
|
||||
normally or via a kick or ban. Defaults to false.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
forget_rooms_on_leave: false
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing
|
||||
@@ -3836,15 +3884,20 @@ federation_sender_instances:
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
HTTP replication listener of the worker, if configured, and to the main process.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
The main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is declared
|
||||
inside the `listener` block for a `replication` listener.
|
||||
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
worker1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
@@ -3928,9 +3981,16 @@ This setting has the following sub-options:
|
||||
localhost and 6379
|
||||
* `password`: Optional password if configured on the Redis instance.
|
||||
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
|
||||
* `use_tls`: Whether to use tls connection. Defaults to false.
|
||||
* `certificate_file`: Optional path to the certificate file
|
||||
* `private_key_file`: Optional path to the private key file
|
||||
* `ca_file`: Optional path to the CA certificate file. Use this one or:
|
||||
* `ca_path`: Optional path to the folder containing the CA certificate file
|
||||
|
||||
_Added in Synapse 1.78.0._
|
||||
|
||||
_Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
redis:
|
||||
@@ -3939,6 +3999,10 @@ redis:
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
dbid: <dbid>
|
||||
#use_tls: True
|
||||
#certificate_file: <path_to_the_certificate_file>
|
||||
#private_key_file: <path_to_the_private_key_file>
|
||||
#ca_file: <path_to_the_ca_certificate_file>
|
||||
```
|
||||
---
|
||||
## Individual worker configuration
|
||||
@@ -3976,6 +4040,7 @@ worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
@@ -3987,6 +4052,7 @@ worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
@@ -3998,6 +4064,7 @@ worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
@@ -4023,9 +4090,9 @@ A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
`replication` listener here, in order to accept internal HTTP requests from
|
||||
other workers.
|
||||
Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map)
|
||||
will need to include a `replication` listener here, in order to accept internal HTTP
|
||||
requests from other workers.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
|
||||
111
docs/workers.md
111
docs/workers.md
@@ -87,12 +87,18 @@ shared configuration file.
|
||||
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
Normally, only a few changes are needed to make an existing configuration
|
||||
file suitable for use with workers:
|
||||
* First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
for the main process
|
||||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
|
||||
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
```yaml
|
||||
@@ -111,6 +117,11 @@ worker_replication_secret: ""
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
|
||||
instance_map:
|
||||
main:
|
||||
host: 'localhost'
|
||||
port: 9093
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
@@ -130,13 +141,13 @@ In the config file for each worker, you must specify:
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
|
||||
|
||||
For example:
|
||||
|
||||
@@ -234,6 +245,8 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
@@ -245,7 +258,9 @@ information.
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||
^/_matrix/client/(r0|v3|unstable)/register$
|
||||
^/_matrix/client/(r0|v3|unstable)/register/available$
|
||||
^/_matrix/client/v1/register/m.login.registration_token/validity$
|
||||
^/_matrix/client/(r0|v3|unstable)/password_policy$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
|
||||
@@ -321,8 +336,7 @@ load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. This can
|
||||
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
|
||||
`hash $http_authorization consistent;` which contains the users access token.
|
||||
be done in reverse proxy by extracting username part from the users access token.
|
||||
|
||||
Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
@@ -331,6 +345,69 @@ when a user logs in on a new device and can be *very* resource intensive, so
|
||||
isolating these requests will stop them from interfering with other users ongoing
|
||||
syncs.
|
||||
|
||||
Example `nginx` configuration snippet that handles the cases above. This is just an
|
||||
example and probably requires some changes according to your particular setup:
|
||||
|
||||
```nginx
|
||||
# Choose sync worker based on the existence of "since" query parameter
|
||||
map $arg_since $sync {
|
||||
default synapse_sync;
|
||||
'' synapse_initial_sync;
|
||||
}
|
||||
|
||||
# Extract username from access token passed as URL parameter
|
||||
map $arg_access_token $accesstoken_from_urlparam {
|
||||
# Defaults to just passing back the whole accesstoken
|
||||
default $arg_access_token;
|
||||
# Try to extract username part from accesstoken URL parameter
|
||||
"~syt_(?<username>.*?)_.*" $username;
|
||||
}
|
||||
|
||||
# Extract username from access token passed as authorization header
|
||||
map $http_authorization $mxid_localpart {
|
||||
# Defaults to just passing back the whole accesstoken
|
||||
default $http_authorization;
|
||||
# Try to extract username part from accesstoken header
|
||||
"~Bearer syt_(?<username>.*?)_.*" $username;
|
||||
# if no authorization-header exist, try mapper for URL parameter "access_token"
|
||||
"" $accesstoken_from_urlparam;
|
||||
}
|
||||
|
||||
upstream synapse_initial_sync {
|
||||
# Use the username mapper result for hash key
|
||||
hash $mxid_localpart consistent;
|
||||
server 127.0.0.1:8016;
|
||||
server 127.0.0.1:8036;
|
||||
}
|
||||
|
||||
upstream synapse_sync {
|
||||
# Use the username mapper result for hash key
|
||||
hash $mxid_localpart consistent;
|
||||
server 127.0.0.1:8013;
|
||||
server 127.0.0.1:8037;
|
||||
server 127.0.0.1:8038;
|
||||
server 127.0.0.1:8039;
|
||||
}
|
||||
|
||||
# Sync initial/normal
|
||||
location ~ ^/_matrix/client/(r0|v3)/sync$ {
|
||||
proxy_pass http://$sync;
|
||||
}
|
||||
|
||||
# Normal sync
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ {
|
||||
proxy_pass http://synapse_sync;
|
||||
}
|
||||
|
||||
# Initial_sync
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ {
|
||||
proxy_pass http://synapse_initial_sync;
|
||||
}
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ {
|
||||
proxy_pass http://synapse_initial_sync;
|
||||
}
|
||||
```
|
||||
|
||||
Federation and client requests can be balanced via simple round robin.
|
||||
|
||||
The inbound federation transaction request `^/_matrix/federation/v1/send/`
|
||||
@@ -351,11 +428,14 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
To enable this, the worker must have:
|
||||
* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
config.
|
||||
* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well.
|
||||
|
||||
Note: The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
@@ -363,6 +443,9 @@ configuration would include:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
event_persister1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
274
flake.lock
generated
Normal file
274
flake.lock
generated
Normal file
@@ -0,0 +1,274 @@
|
||||
{
|
||||
"nodes": {
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682490133,
|
||||
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"pre-commit-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1660459072,
|
||||
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1676545802,
|
||||
"narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
|
||||
"owner": "domenkozar",
|
||||
"repo": "nix",
|
||||
"rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "domenkozar",
|
||||
"ref": "relaxed-flakes",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1678875422,
|
||||
"narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-regression": {
|
||||
"locked": {
|
||||
"lastModified": 1643052045,
|
||||
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-utils": "flake-utils",
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"fenix": "fenix",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1682426789,
|
||||
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
204
flake.nix
Normal file
204
flake.nix
Normal file
@@ -0,0 +1,204 @@
|
||||
# A nix flake that sets up a complete Synapse development environment. Dependencies
|
||||
# for the SyTest (https://github.com/matrix-org/sytest) and Complement
|
||||
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
|
||||
# installed automatically.
|
||||
#
|
||||
# You must have already installed nix (https://nixos.org) on your system to use this.
|
||||
# nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
|
||||
# directly supported, but nix can be installed inside of WSL2 or even Docker
|
||||
# containers. Please refer to https://nixos.org/download for details.
|
||||
#
|
||||
# You must also enable support for flakes in Nix. See the following for how to
|
||||
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# With nix installed, navigate to the directory containing this flake and run
|
||||
# `nix develop --impure`. The `--impure` is necessary in order to store state
|
||||
# locally from "services", such as PostgreSQL and Redis.
|
||||
#
|
||||
# You should now be dropped into a new shell with all programs and dependencies
|
||||
# availabile to you!
|
||||
#
|
||||
# You can start up pre-configured, local PostgreSQL and Redis instances by
|
||||
# running: `devenv up`. To stop them, use Ctrl-C.
|
||||
#
|
||||
# A PostgreSQL database called 'synapse' will be set up for you, along with
|
||||
# a PostgreSQL user named 'synapse_user'.
|
||||
# The 'host' can be found by running `echo $PGHOST` with the development
|
||||
# shell activated. Use these values to configure your Synapse to connect
|
||||
# to the local PostgreSQL database. You do not need to specify a password.
|
||||
# https://matrix-org.github.io/synapse/latest/postgres
|
||||
#
|
||||
# All state (the venv, postgres and redis data and config) are stored in
|
||||
# .devenv/state. Deleting a file from here and then re-entering the shell
|
||||
# will recreate these files from scratch.
|
||||
#
|
||||
# You can exit the development shell by typing `exit`, or using Ctrl-D.
|
||||
#
|
||||
# If you would like this development environment to activate automatically
|
||||
# upon entering this directory in your terminal, first install `direnv`
|
||||
# (https://direnv.net/). Then run `echo 'use flake . --impure' >> .envrc` at
|
||||
# the root of the Synapse repo. Finally, run `direnv allow .` to allow the
|
||||
# contents of '.envrc' to run every time you enter this directory. Voilà!
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
# Rust toolchains and rust-analyzer nightly.
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
devShells = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in {
|
||||
# Everything is configured via devenv - a nix module for creating declarative
|
||||
# developer environments. See https://devenv.sh/reference/options/ for a list
|
||||
# of all possible options.
|
||||
default = devenv.lib.mkShell {
|
||||
inherit inputs pkgs;
|
||||
modules = [
|
||||
{
|
||||
# Make use of the Starship command prompt when this development environment
|
||||
# is manually activated (via `nix develop --impure`).
|
||||
# See https://starship.rs/ for details on the prompt itself.
|
||||
starship.enable = true;
|
||||
|
||||
# Configure packages to install.
|
||||
# Search for package names at https://search.nixos.org/packages?channel=unstable
|
||||
packages = with pkgs; [
|
||||
# Native dependencies for running Synapse.
|
||||
icu
|
||||
libffi
|
||||
libjpeg
|
||||
libpqxx
|
||||
libwebp
|
||||
libxml2
|
||||
libxslt
|
||||
sqlite
|
||||
|
||||
# Native dependencies for unit tests (SyTest also requires OpenSSL).
|
||||
openssl
|
||||
xmlsec
|
||||
|
||||
# Native dependencies for running Complement.
|
||||
olm
|
||||
|
||||
# For building the Synapse documentation website.
|
||||
mdbook
|
||||
];
|
||||
|
||||
# Install Python and manage a virtualenv with Poetry.
|
||||
languages.python.enable = true;
|
||||
languages.python.poetry.enable = true;
|
||||
# Automatically activate the poetry virtualenv upon entering the shell.
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
|
||||
# This is a work-around for NixOS systems. NixOS is special in
|
||||
# that you can have multiple versions of packages installed at
|
||||
# once, including your libc linker!
|
||||
#
|
||||
# Some binaries built for Linux expect those to be in a certain
|
||||
# filepath, but that is not the case on NixOS. In that case, we
|
||||
# force compiling those binaries locally instead.
|
||||
env.POETRY_INSTALLER_NO_BINARY = "ruff";
|
||||
|
||||
# Install dependencies for the additional programming languages
|
||||
# involved with Synapse development.
|
||||
#
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# * Golang is needed to run the Complement test suite.
|
||||
# * Perl is needed to run the SyTest test suite.
|
||||
languages.go.enable = true;
|
||||
languages.rust.enable = true;
|
||||
languages.rust.version = "stable";
|
||||
languages.perl.enable = true;
|
||||
|
||||
# Postgres is needed to run Synapse with postgres support and
|
||||
# to run certain unit tests that require postgres.
|
||||
services.postgres.enable = true;
|
||||
|
||||
# On the first invocation of `devenv up`, create a database for
|
||||
# Synapse to store data in.
|
||||
services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"];
|
||||
services.postgres.initialDatabases = [
|
||||
{ name = "synapse"; }
|
||||
];
|
||||
# Create a postgres user called 'synapse_user' which has ownership
|
||||
# over the 'synapse' database.
|
||||
services.postgres.initialScript = ''
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
'';
|
||||
|
||||
# Redis is needed in order to run Synapse in worker mode.
|
||||
services.redis.enable = true;
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
# This list was compiled by cross-referencing https://metacpan.org/
|
||||
# with the modules defined in './cpanfile' and then finding the
|
||||
# corresponding nix packages on https://search.nixos.org/packages.
|
||||
#
|
||||
# This was done until `./install-deps.pl --dryrun` produced no output.
|
||||
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
|
||||
DBI
|
||||
ClassMethodModifiers
|
||||
CryptEd25519
|
||||
DataDump
|
||||
DBDPg
|
||||
DigestHMAC
|
||||
DigestSHA1
|
||||
EmailAddressXS
|
||||
EmailMIME
|
||||
EmailSimple # required by Email::Mime
|
||||
EmailMessageID # required by Email::Mime
|
||||
EmailMIMEContentType # required by Email::Mime
|
||||
TextUnidecode # required by Email::Mime
|
||||
ModuleRuntime # required by Email::Mime
|
||||
EmailMIMEEncodings # required by Email::Mime
|
||||
FilePath
|
||||
FileSlurper
|
||||
Future
|
||||
GetoptLong
|
||||
HTTPMessage
|
||||
IOAsync
|
||||
IOAsyncSSL
|
||||
IOSocketSSL
|
||||
NetSSLeay
|
||||
JSON
|
||||
ListUtilsBy
|
||||
ScalarListUtils
|
||||
ModulePluggable
|
||||
NetAsyncHTTP
|
||||
MetricsAny # required by Net::Async::HTTP
|
||||
NetAsyncHTTPServer
|
||||
StructDumb
|
||||
URI
|
||||
YAMLLibYAML
|
||||
]}";
|
||||
}
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
27
mypy.ini
27
mypy.ini
@@ -21,26 +21,7 @@ files =
|
||||
tests/,
|
||||
build_rust.py
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
#
|
||||
# For now, set the (?x) flag enable "verbose" regexes
|
||||
# https://docs.python.org/3/library/re.html#re.X
|
||||
exclude = (?x)
|
||||
^(
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/schema/
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.http.matrixfederationclient]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||
warn_unused_ignores = False
|
||||
@@ -48,9 +29,6 @@ warn_unused_ignores = False
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.storage.database]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
@@ -74,11 +52,6 @@ ignore_missing_imports = True
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Note: WIP stubs available at
|
||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
2197
poetry.lock
generated
2197
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.79.0rc1"
|
||||
version = "1.84.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -153,15 +153,13 @@ python = "^3.7.1"
|
||||
# ----------------------
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
jsonschema = ">=3.0.0"
|
||||
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
|
||||
# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this
|
||||
# because frozendict has been more trouble than it's worth; we would like to move to immutabledict.
|
||||
frozendict = ">=1,!=2.1.2,<2.3.5"
|
||||
# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
|
||||
# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
|
||||
immutabledict = ">=2.0"
|
||||
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
|
||||
unpaddedbase64 = ">=2.1.0"
|
||||
# We require 1.5.0 to work around an issue when running against the C implementation of
|
||||
# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36
|
||||
canonicaljson = "^1.5.0"
|
||||
# We require 2.0.0 for immutabledict support.
|
||||
canonicaljson = "^2.0.0"
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
signedjson = "^1.1.0"
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
@@ -313,7 +311,7 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.252"
|
||||
ruff = "0.0.265"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
@@ -321,6 +319,7 @@ mypy-zope = "*"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
types-netaddr = ">=0.8.0.6"
|
||||
types-opentracing = ">=2.4.2"
|
||||
types-Pillow = ">=8.3.4"
|
||||
types-psycopg2 = ">=2.9.9"
|
||||
@@ -351,13 +350,25 @@ towncrier = ">=18.6.0rc1"
|
||||
# Used for checking the Poetry lockfile
|
||||
tomli = ">=1.2.3"
|
||||
|
||||
|
||||
# Dependencies for building the development documentation
|
||||
[tool.poetry.group.dev-docs]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.dev-docs.dependencies]
|
||||
sphinx = {version = "^6.1", python = "^3.8"}
|
||||
sphinx-autodoc2 = {version = "^0.4.2", python = "^3.8"}
|
||||
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
||||
furo = ">=2022.12.7,<2024.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# #13849 and #14079 where we see buildtime or runtime errors caused by build
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: false,
|
||||
}];
|
||||
@@ -88,7 +88,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
pattern: Cow::Borrowed("m.notice"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
@@ -122,7 +122,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
pattern: Cow::Borrowed("m.room.member"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
|
||||
@@ -140,7 +140,7 @@ impl PushRuleEvaluator {
|
||||
/// name.
|
||||
///
|
||||
/// Returns the set of actions, if any, that match (filtering out any
|
||||
/// `dont_notify` actions).
|
||||
/// `dont_notify` and `coalesce` actions).
|
||||
pub fn run(
|
||||
&self,
|
||||
push_rules: &FilteredPushRules,
|
||||
@@ -198,8 +198,9 @@ impl PushRuleEvaluator {
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
// Filter out "dont_notify" actions, as we don't store them.
|
||||
.filter(|a| **a != Action::DontNotify)
|
||||
// Filter out "dont_notify" and "coalesce" actions, as we don't store them
|
||||
// (since they result in no action by the pushers).
|
||||
.filter(|a| **a != Action::DontNotify && **a != Action::Coalesce)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -164,11 +164,13 @@ impl PushRule {
|
||||
/// The "action" Synapse should perform for a matching push rule.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Action {
|
||||
DontNotify,
|
||||
Notify,
|
||||
Coalesce,
|
||||
SetTweak(SetTweak),
|
||||
|
||||
// Legacy actions that should be understood, but are equivalent to no-ops.
|
||||
DontNotify,
|
||||
Coalesce,
|
||||
|
||||
// An unrecognized custom action.
|
||||
Unknown(Value),
|
||||
}
|
||||
@@ -568,7 +570,10 @@ impl FilteredPushRules {
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
if !self.msc1767_enabled
|
||||
&& (rule.rule_id.contains("org.matrix.msc1767")
|
||||
|| rule.rule_id.contains("org.matrix.msc3933"))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ DISTS = (
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -40,10 +40,32 @@ def main(force_colors: bool) -> None:
|
||||
exec(r, locals)
|
||||
current_schema_version = locals["SCHEMA_VERSION"]
|
||||
|
||||
click.secho(f"Current schema version: {current_schema_version}")
|
||||
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
# Get the schema version of the local file to check against current schema on develop
|
||||
with open("synapse/storage/schema/__init__.py", "r") as file:
|
||||
local_schema = file.read()
|
||||
new_locals: Dict[str, Any] = {}
|
||||
exec(local_schema, new_locals)
|
||||
local_schema_version = new_locals["SCHEMA_VERSION"]
|
||||
|
||||
if local_schema_version != current_schema_version:
|
||||
# local schema version must be +/-1 the current schema version on develop
|
||||
if abs(local_schema_version - current_schema_version) != 1:
|
||||
click.secho(
|
||||
"The proposed schema version has diverged more than one version from develop, please fix!",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.get_current_context().exit(1)
|
||||
|
||||
# right, we've changed the schema version within the allowable tolerance so
|
||||
# let's now use the local version as the canonical version
|
||||
current_schema_version = local_schema_version
|
||||
|
||||
click.secho(f"Current schema version: {current_schema_version}")
|
||||
|
||||
seen_deltas = False
|
||||
bad_files = []
|
||||
for diff in diffs:
|
||||
|
||||
@@ -11,6 +11,11 @@
|
||||
# filepath of a local Complement checkout or by setting the COMPLEMENT_REF
|
||||
# environment variable to pull a different branch or commit.
|
||||
#
|
||||
# To use the 'podman' command instead 'docker', set the PODMAN environment
|
||||
# variable. Example:
|
||||
#
|
||||
# PODMAN=1 ./complement.sh
|
||||
#
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
@@ -30,7 +35,6 @@
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
|
||||
|
||||
# Helper to emit annotations that collapse portions of the log in GitHub Actions
|
||||
echo_if_github() {
|
||||
if [[ -n "$GITHUB_WORKFLOW" ]]; then
|
||||
@@ -100,6 +104,16 @@ done
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# Determine whether to use the docker or podman container runtime.
|
||||
if [ -n "$PODMAN" ]; then
|
||||
export CONTAINER_RUNTIME=podman
|
||||
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
|
||||
export BUILDAH_FORMAT=docker
|
||||
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
|
||||
else
|
||||
export CONTAINER_RUNTIME=docker
|
||||
fi
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
@@ -126,16 +140,16 @@ if [ -n "$use_editable_synapse" ]; then
|
||||
editable_mount="$(realpath .):/editable-src:z"
|
||||
if [ -n "$rebuild_editable_synapse" ]; then
|
||||
unset skip_docker_build
|
||||
elif docker inspect complement-synapse-editable &>/dev/null; then
|
||||
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
|
||||
# complement-synapse-editable already exists: see if we can still use it:
|
||||
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
|
||||
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
|
||||
|
||||
# First set up the module in the right place for an editable installation.
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
skip_docker_build=1
|
||||
else
|
||||
echo "Editable Synapse image is stale. Will rebuild."
|
||||
@@ -149,25 +163,25 @@ if [ -z "$skip_docker_build" ]; then
|
||||
|
||||
# Build a special image designed for use in development with editable
|
||||
# installs.
|
||||
docker build -t synapse-editable \
|
||||
$CONTAINER_RUNTIME build -t synapse-editable \
|
||||
-f "docker/editable.Dockerfile" .
|
||||
|
||||
docker build -t synapse-workers-editable \
|
||||
$CONTAINER_RUNTIME build -t synapse-workers-editable \
|
||||
--build-arg FROM=synapse-editable \
|
||||
-f "docker/Dockerfile-workers" .
|
||||
|
||||
docker build -t complement-synapse-editable \
|
||||
$CONTAINER_RUNTIME build -t complement-synapse-editable \
|
||||
--build-arg FROM=synapse-workers-editable \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
|
||||
# Prepare the Rust module
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
else
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
@@ -175,12 +189,12 @@ if [ -z "$skip_docker_build" ]; then
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
$CONTAINER_RUNTIME build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
|
||||
@@ -91,6 +91,7 @@ else
|
||||
"synapse" "docker" "tests"
|
||||
"scripts-dev"
|
||||
"contrib" "synmark" "stubs" ".ci"
|
||||
"dev-docs"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -280,7 +280,7 @@ def _prepare() -> None:
|
||||
)
|
||||
|
||||
print("Opening the changelog in your browser...")
|
||||
print("Please ask others to give it a check.")
|
||||
print("Please ask #synapse-dev to give it a check.")
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Stub for frozendict.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Hashable, Iterable, Iterator, Mapping, Tuple, TypeVar, overload
|
||||
|
||||
_KT = TypeVar("_KT", bound=Hashable) # Key type.
|
||||
_VT = TypeVar("_VT") # Value type.
|
||||
|
||||
class frozendict(Mapping[_KT, _VT]):
|
||||
@overload
|
||||
def __init__(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
def __getitem__(self, key: _KT) -> _VT: ...
|
||||
def __contains__(self, key: Any) -> bool: ...
|
||||
def copy(self, **add_or_replace: Any) -> frozendict: ...
|
||||
def __iter__(self) -> Iterator[_KT]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __hash__(self) -> int: ...
|
||||
@@ -17,9 +17,9 @@
|
||||
""" This is an implementation of a Matrix homeserver.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, Dict
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
@@ -61,11 +61,20 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Use the standard library json implementation instead of simplejson.
|
||||
# Teach canonicaljson how to serialise immutabledicts.
|
||||
try:
|
||||
from canonicaljson import set_json_library
|
||||
from canonicaljson import register_preserialisation_callback
|
||||
from immutabledict import immutabledict
|
||||
|
||||
set_json_library(json)
|
||||
def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]:
|
||||
try:
|
||||
return d._dict
|
||||
except Exception:
|
||||
# Paranoia: fall back to a `dict()` call, in case a future version of
|
||||
# immutabledict removes `_dict` from the implementation.
|
||||
return dict(d)
|
||||
|
||||
register_preserialisation_callback(immutabledict, _immutabledict_cb)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
302
synapse/_scripts/generate_workers_map.py
Executable file
302
synapse/_scripts/generate_workers_map.py
Executable file
@@ -0,0 +1,302 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022-2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Iterable, Optional, Pattern, Set, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.federation.transport.server import (
|
||||
TransportLayerServer,
|
||||
register_servlets as register_federation_servlets,
|
||||
)
|
||||
from synapse.http.server import HttpServer, ServletCallback
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v2 import RemoteKey
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
|
||||
logger = logging.getLogger("generate_workers_map")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
self.config.worker.worker_app = worker_app
|
||||
|
||||
|
||||
GROUP_PATTERN = re.compile(r"\(\?P<[^>]+?>(.+?)\)")
|
||||
|
||||
|
||||
@dataclass
|
||||
class EndpointDescription:
|
||||
"""
|
||||
Describes an endpoint and how it should be routed.
|
||||
"""
|
||||
|
||||
# The servlet class that handles this endpoint
|
||||
servlet_class: object
|
||||
|
||||
# The category of this endpoint. Is read from the `CATEGORY` constant in the servlet
|
||||
# class.
|
||||
category: Optional[str]
|
||||
|
||||
# TODO:
|
||||
# - does it need to be routed based on a stream writer config?
|
||||
# - does it benefit from any optimised, but optional, routing?
|
||||
# - what 'opinionated synapse worker class' (event_creator, synchrotron, etc) does
|
||||
# it go in?
|
||||
|
||||
|
||||
class EnumerationResource(HttpServer):
|
||||
"""
|
||||
Accepts servlet registrations for the purposes of building up a description of
|
||||
all endpoints.
|
||||
"""
|
||||
|
||||
def __init__(self, is_worker: bool) -> None:
|
||||
self.registrations: Dict[Tuple[str, str], EndpointDescription] = {}
|
||||
self._is_worker = is_worker
|
||||
|
||||
def register_paths(
|
||||
self,
|
||||
method: str,
|
||||
path_patterns: Iterable[Pattern],
|
||||
callback: ServletCallback,
|
||||
servlet_classname: str,
|
||||
) -> None:
|
||||
# federation servlet callbacks are wrapped, so unwrap them.
|
||||
callback = getattr(callback, "__wrapped__", callback)
|
||||
|
||||
# fish out the servlet class
|
||||
servlet_class = callback.__self__.__class__ # type: ignore
|
||||
|
||||
if self._is_worker and method in getattr(
|
||||
servlet_class, "WORKERS_DENIED_METHODS", ()
|
||||
):
|
||||
# This endpoint would cause an error if called on a worker, so pretend it
|
||||
# was never registered!
|
||||
return
|
||||
|
||||
sd = EndpointDescription(
|
||||
servlet_class=servlet_class,
|
||||
category=getattr(servlet_class, "CATEGORY", None),
|
||||
)
|
||||
|
||||
for pat in path_patterns:
|
||||
self.registrations[(method, pat.pattern)] = sd
|
||||
|
||||
|
||||
def get_registered_paths_for_hs(
|
||||
hs: HomeServer,
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Given a homeserver, get all registered endpoints and their descriptions.
|
||||
"""
|
||||
|
||||
enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None)
|
||||
ClientRestResource.register_servlets(enumerator, hs)
|
||||
federation_server = TransportLayerServer(hs)
|
||||
|
||||
# we can't use `federation_server.register_servlets` but this line does the
|
||||
# same thing, only it uses this enumerator
|
||||
register_federation_servlets(
|
||||
federation_server.hs,
|
||||
resource=enumerator,
|
||||
ratelimiter=federation_server.ratelimiter,
|
||||
authenticator=federation_server.authenticator,
|
||||
servlet_groups=federation_server.servlet_groups,
|
||||
)
|
||||
|
||||
# the key server endpoints are separate again
|
||||
RemoteKey(hs).register(enumerator)
|
||||
|
||||
return enumerator.registrations
|
||||
|
||||
|
||||
def get_registered_paths_for_default(
|
||||
worker_app: Optional[str], base_config: HomeServerConfig
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Given the name of a worker application and a base homeserver configuration,
|
||||
returns:
|
||||
|
||||
Dict from (method, path) to EndpointDescription
|
||||
|
||||
TODO Don't require passing in a config
|
||||
"""
|
||||
|
||||
hs = MockHomeserver(base_config, worker_app)
|
||||
# TODO We only do this to avoid an error, but don't need the database etc
|
||||
hs.setup()
|
||||
return get_registered_paths_for_hs(hs)
|
||||
|
||||
|
||||
def elide_http_methods_if_unconflicting(
|
||||
registrations: Dict[Tuple[str, str], EndpointDescription],
|
||||
all_possible_registrations: Dict[Tuple[str, str], EndpointDescription],
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Elides HTTP methods (by replacing them with `*`) if all possible registered methods
|
||||
can be handled by the worker whose registration map is `registrations`.
|
||||
|
||||
i.e. the only endpoints left with methods (other than `*`) should be the ones where
|
||||
the worker can't handle all possible methods for that path.
|
||||
"""
|
||||
|
||||
def paths_to_methods_dict(
|
||||
methods_and_paths: Iterable[Tuple[str, str]]
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""
|
||||
Given (method, path) pairs, produces a dict from path to set of methods
|
||||
available at that path.
|
||||
"""
|
||||
result: Dict[str, Set[str]] = {}
|
||||
for method, path in methods_and_paths:
|
||||
result.setdefault(path, set()).add(method)
|
||||
return result
|
||||
|
||||
all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations)
|
||||
reg_methods = paths_to_methods_dict(registrations)
|
||||
|
||||
output = {}
|
||||
|
||||
for path, handleable_methods in reg_methods.items():
|
||||
if handleable_methods == all_possible_reg_methods[path]:
|
||||
any_method = next(iter(handleable_methods))
|
||||
# TODO This assumes that all methods have the same servlet.
|
||||
# I suppose that's possibly dubious?
|
||||
output[("*", path)] = registrations[(any_method, path)]
|
||||
else:
|
||||
for method in handleable_methods:
|
||||
output[(method, path)] = registrations[(method, path)]
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def simplify_path_regexes(
|
||||
registrations: Dict[Tuple[str, str], EndpointDescription]
|
||||
) -> Dict[Tuple[str, str], EndpointDescription]:
|
||||
"""
|
||||
Simplify all the path regexes for the dict of endpoint descriptions,
|
||||
so that we don't use the Python-specific regex extensions
|
||||
(and also to remove needlessly specific detail).
|
||||
"""
|
||||
|
||||
def simplify_path_regex(path: str) -> str:
|
||||
"""
|
||||
Given a regex pattern, replaces all named capturing groups (e.g. `(?P<blah>xyz)`)
|
||||
with a simpler version available in more common regex dialects (e.g. `.*`).
|
||||
"""
|
||||
|
||||
# TODO it's hard to choose between these two;
|
||||
# `.*` is a vague simplification
|
||||
# return GROUP_PATTERN.sub(r"\1", path)
|
||||
return GROUP_PATTERN.sub(r".*", path)
|
||||
|
||||
return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Updates a synapse database to the latest schema and optionally runs background updates"
|
||||
" on it."
|
||||
)
|
||||
)
|
||||
parser.add_argument("-v", action="store_true")
|
||||
parser.add_argument(
|
||||
"--config-path",
|
||||
type=argparse.FileType("r"),
|
||||
required=True,
|
||||
help="Synapse configuration file",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# TODO
|
||||
# logging.basicConfig(**logging_config)
|
||||
|
||||
# Load, process and sanity-check the config.
|
||||
hs_config = yaml.safe_load(args.config_path)
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
master_paths = get_registered_paths_for_default(None, config)
|
||||
worker_paths = get_registered_paths_for_default(
|
||||
"synapse.app.generic_worker", config
|
||||
)
|
||||
|
||||
all_paths = {**master_paths, **worker_paths}
|
||||
|
||||
elided_worker_paths = elide_http_methods_if_unconflicting(worker_paths, all_paths)
|
||||
elide_http_methods_if_unconflicting(master_paths, all_paths)
|
||||
|
||||
# TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT
|
||||
|
||||
categories_to_methods_and_paths: Dict[
|
||||
Optional[str], Dict[Tuple[str, str], EndpointDescription]
|
||||
] = defaultdict(dict)
|
||||
|
||||
for (method, path), desc in elided_worker_paths.items():
|
||||
categories_to_methods_and_paths[desc.category][method, path] = desc
|
||||
|
||||
for category, contents in categories_to_methods_and_paths.items():
|
||||
print_category(category, contents)
|
||||
|
||||
|
||||
def print_category(
|
||||
category_name: Optional[str],
|
||||
elided_worker_paths: Dict[Tuple[str, str], EndpointDescription],
|
||||
) -> None:
|
||||
"""
|
||||
Prints out a category, in documentation page style.
|
||||
|
||||
Example:
|
||||
```
|
||||
# Category name
|
||||
/path/xyz
|
||||
|
||||
GET /path/abc
|
||||
```
|
||||
"""
|
||||
|
||||
if category_name:
|
||||
print(f"# {category_name}")
|
||||
else:
|
||||
print("# (Uncategorised requests)")
|
||||
|
||||
for ln in sorted(
|
||||
p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*"
|
||||
):
|
||||
print(ln)
|
||||
print()
|
||||
for ln in sorted(
|
||||
f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*"
|
||||
):
|
||||
print(ln)
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -18,6 +18,7 @@
|
||||
import argparse
|
||||
import curses
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
@@ -53,11 +54,12 @@ from synapse.logging.context import (
|
||||
)
|
||||
from synapse.notifier import ReplicationNotifier
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import PushRuleStore
|
||||
from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
@@ -67,7 +69,11 @@ from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.pusher import (
|
||||
PusherBackgroundUpdatesStore,
|
||||
PusherWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
@@ -119,6 +125,7 @@ BOOLEAN_COLUMNS = {
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
@@ -221,10 +228,14 @@ class Store(
|
||||
MainStateBackgroundUpdateStore,
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
EndToEndKeyBackgroundStore,
|
||||
EndToEndRoomKeyBackgroundStore,
|
||||
StatsStore,
|
||||
AccountDataWorkerStore,
|
||||
FilteringWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
PushRuleStore,
|
||||
PusherWorkerStore,
|
||||
PusherBackgroundUpdatesStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
@@ -1326,10 +1337,17 @@ def main() -> None:
|
||||
filename="port-synapse.log" if args.curses else None,
|
||||
)
|
||||
|
||||
if not os.path.isfile(args.sqlite_database):
|
||||
sys.stderr.write(
|
||||
"The sqlite database you specified does not exist, please check that you have the"
|
||||
"correct path."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
sqlite_config = {
|
||||
"name": "sqlite3",
|
||||
"args": {
|
||||
"database": "file:{}?mode=rw".format(args.sqlite_database),
|
||||
"database": args.sqlite_database,
|
||||
"cp_min": 1,
|
||||
"cp_max": 1,
|
||||
"check_same_thread": False,
|
||||
|
||||
@@ -39,7 +39,7 @@ class AuthBlocking:
|
||||
self._mau_limits_reserved_threepids = (
|
||||
hs.config.server.mau_limits_reserved_threepids
|
||||
)
|
||||
self._server_name = hs.hostname
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||
|
||||
async def check_auth_blocking(
|
||||
@@ -77,7 +77,7 @@ class AuthBlocking:
|
||||
if requester:
|
||||
if requester.authenticated_entity.startswith("@"):
|
||||
user_id = requester.authenticated_entity
|
||||
elif requester.authenticated_entity == self._server_name:
|
||||
elif self._is_mine_server_name(requester.authenticated_entity):
|
||||
# We never block the server from doing actions on behalf of
|
||||
# users.
|
||||
return
|
||||
|
||||
@@ -215,6 +215,8 @@ class EventContentFields:
|
||||
FEDERATE: Final = "m.federate"
|
||||
|
||||
# The creator of the room, as used in `m.room.create` events.
|
||||
#
|
||||
# This is deprecated in MSC2175.
|
||||
ROOM_CREATOR: Final = "creator"
|
||||
|
||||
# Used in m.room.guest_access events.
|
||||
@@ -255,6 +257,7 @@ class AccountDataTypes:
|
||||
DIRECT: Final = "m.direct"
|
||||
IGNORED_USER_LIST: Final = "m.ignored_user_list"
|
||||
TAG: Final = "m.tag"
|
||||
PUSH_RULES: Final = "m.push_rules"
|
||||
|
||||
|
||||
class HistoryVisibility:
|
||||
|
||||
@@ -27,7 +27,7 @@ from synapse.util import json_decoder
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -108,6 +108,11 @@ class Codes(str, Enum):
|
||||
|
||||
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
||||
|
||||
AS_PING_URL_NOT_SET = "M_URL_NOT_SET"
|
||||
AS_PING_BAD_STATUS = "M_BAD_STATUS"
|
||||
AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT"
|
||||
AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED"
|
||||
|
||||
# Attempt to send a second annotation with the same event type & annotation key
|
||||
# MSC2677
|
||||
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
||||
@@ -677,18 +682,27 @@ class FederationPullAttemptBackoffError(RuntimeError):
|
||||
Attributes:
|
||||
event_id: The event_id which we are refusing to pull
|
||||
message: A custom error message that gives more context
|
||||
retry_after_ms: The remaining backoff interval, in milliseconds
|
||||
"""
|
||||
|
||||
def __init__(self, event_ids: List[str], message: Optional[str]):
|
||||
self.event_ids = event_ids
|
||||
def __init__(
|
||||
self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int
|
||||
):
|
||||
event_ids = list(event_ids)
|
||||
|
||||
if message:
|
||||
error_message = message
|
||||
else:
|
||||
error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
|
||||
error_message = (
|
||||
f"Not attempting to pull event_ids={event_ids} because we already "
|
||||
"tried to pull them recently (backing off)."
|
||||
)
|
||||
|
||||
super().__init__(error_message)
|
||||
|
||||
self.event_ids = event_ids
|
||||
self.retry_after_ms = retry_after_ms
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
"""
|
||||
|
||||
@@ -170,11 +170,9 @@ class Filtering:
|
||||
result = await self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(self._hs, result)
|
||||
|
||||
def add_user_filter(
|
||||
self, user_localpart: str, user_filter: JsonDict
|
||||
) -> Awaitable[int]:
|
||||
def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]:
|
||||
self.check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
return self.store.add_user_filter(user_id, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
|
||||
@@ -78,7 +78,10 @@ class RoomVersion:
|
||||
# MSC2209: Check 'notifications' key while verifying
|
||||
# m.room.power_levels auth rules.
|
||||
limit_notifications_power_levels: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm.
|
||||
# MSC2175: No longer include the creator in m.room.create events.
|
||||
msc2175_implicit_room_creator: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
|
||||
# content property.
|
||||
msc2176_redaction_rules: bool
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules: bool
|
||||
@@ -93,17 +96,23 @@ class RoomVersion:
|
||||
msc2716_historical: bool
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions: bool
|
||||
# MSC3389: Protect relation information from redaction.
|
||||
msc3389_relation_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3821: Do not redact the third_party_invite content field for membership events.
|
||||
msc3821_redaction_rules: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3989: Redact the origin field.
|
||||
msc3989_redaction_rules: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -116,15 +125,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -135,15 +148,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -154,15 +171,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -173,15 +194,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -192,15 +217,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -211,15 +240,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -230,15 +263,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -249,15 +286,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -268,15 +309,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -287,15 +332,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
@@ -306,15 +355,42 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3821 = RoomVersion(
|
||||
"org.matrix.msc3821.opt1",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -325,15 +401,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
@@ -344,15 +424,19 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
@@ -364,15 +448,42 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3989 = RoomVersion(
|
||||
"org.matrix.msc3989",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -392,6 +503,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC2716v4,
|
||||
RoomVersions.MSC3989,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import socket
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from textwrap import indent
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -41,7 +42,12 @@ from typing_extensions import ParamSpec
|
||||
|
||||
import twisted
|
||||
from twisted.internet import defer, error, reactor as _reactor
|
||||
from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorSSL, IReactorTCP
|
||||
from twisted.internet.interfaces import (
|
||||
IOpenSSLContextFactory,
|
||||
IReactorSSL,
|
||||
IReactorTCP,
|
||||
IReactorUNIX,
|
||||
)
|
||||
from twisted.internet.protocol import ServerFactory
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
@@ -56,11 +62,9 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
@@ -68,6 +72,10 @@ from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
|
||||
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
|
||||
load_legacy_third_party_event_rules,
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
@@ -205,8 +213,12 @@ def handle_startup_exception(e: Exception) -> NoReturn:
|
||||
# Exceptions that occur between setting up the logging and forking or starting
|
||||
# the reactor are written to the logs, followed by a summary to stderr.
|
||||
logger.exception("Exception during startup")
|
||||
|
||||
error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__))
|
||||
indented_error_string = indent(error_string, " ")
|
||||
|
||||
quit_with_error(
|
||||
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
|
||||
f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs."
|
||||
)
|
||||
|
||||
|
||||
@@ -351,6 +363,28 @@ def listen_tcp(
|
||||
return r # type: ignore[return-value]
|
||||
|
||||
|
||||
def listen_unix(
|
||||
path: str,
|
||||
mode: int,
|
||||
factory: ServerFactory,
|
||||
reactor: IReactorUNIX = reactor,
|
||||
backlog: int = 50,
|
||||
) -> List[Port]:
|
||||
"""
|
||||
Create a UNIX socket for a given path and 'mode' permission
|
||||
|
||||
Returns:
|
||||
list of twisted.internet.tcp.Port listening for TCP connections
|
||||
"""
|
||||
wantPID = True
|
||||
|
||||
return [
|
||||
# IReactorUNIX returns an object implementing IListeningPort from listenUNIX,
|
||||
# but we know it will be a Port instance.
|
||||
cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID))
|
||||
]
|
||||
|
||||
|
||||
def listen_http(
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
@@ -359,18 +393,13 @@ def listen_http(
|
||||
context_factory: Optional[IOpenSSLContextFactory],
|
||||
reactor: ISynapseReactor = reactor,
|
||||
) -> List[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
site_tag = listener_config.get_site_tag()
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
"synapse.access.%s.%s"
|
||||
% ("https" if listener_config.is_tls() else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
@@ -378,25 +407,41 @@ def listen_http(
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
)
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
if listener_config.is_tls():
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
listener_config.bind_addresses,
|
||||
listener_config.port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info(
|
||||
"Synapse now listening on TCP port %d (TLS)", listener_config.port
|
||||
)
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
listener_config.bind_addresses,
|
||||
listener_config.port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", listener_config.port)
|
||||
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
ports = listen_unix(
|
||||
listener_config.path, listener_config.mode, site, reactor=reactor
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
|
||||
# encoded as a byte string. Decode as utf-8 so pretty.
|
||||
logger.info(
|
||||
"Synapse now listening on Unix Socket at: "
|
||||
f"{ports[0].getHost().name.decode('utf-8')}"
|
||||
)
|
||||
|
||||
return ports
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ from synapse.app._base import (
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.config.server import ListenerConfig, TCPListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.logging.context import LoggingContext
|
||||
@@ -236,12 +236,18 @@ class GenericWorkerServer(HomeServer):
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not using a unix socket for manhole at this time."
|
||||
)
|
||||
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
@@ -249,10 +255,16 @@ class GenericWorkerServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for metrics at this time."
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ from synapse.app._base import (
|
||||
)
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.config.server import ListenerConfig, TCPListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import (
|
||||
@@ -78,14 +78,13 @@ class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
|
||||
def _listener_http(
|
||||
self, config: HomeServerConfig, listener_config: ListenerConfig
|
||||
self,
|
||||
config: HomeServerConfig,
|
||||
listener_config: ListenerConfig,
|
||||
) -> Iterable[Port]:
|
||||
port = listener_config.port
|
||||
# Must exist since this is an HTTP listener.
|
||||
assert listener_config.http_options is not None
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
site_tag = listener_config.get_site_tag()
|
||||
|
||||
# We always include a health resource.
|
||||
resources: Dict[str, Resource] = {"/health": HealthResource()}
|
||||
@@ -252,12 +251,17 @@ class SynapseHomeServer(HomeServer):
|
||||
self._listener_http(self.config, listener)
|
||||
)
|
||||
elif listener.type == "manhole":
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_manhole(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
manhole_settings=self.config.server.manhole_settings,
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for manhole at this time."
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
@@ -265,10 +269,16 @@ class SynapseHomeServer(HomeServer):
|
||||
"enable_metrics is not True!"
|
||||
)
|
||||
else:
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
if isinstance(listener, TCPListenerConfig):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Can not use a unix socket for metrics at this time."
|
||||
)
|
||||
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
# during parsing
|
||||
|
||||
@@ -17,6 +17,8 @@ import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -24,13 +26,14 @@ from typing import (
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypeGuard
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
TransactionOneTimeKeysCount,
|
||||
@@ -38,7 +41,7 @@ from synapse.appservice import (
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig, serialize_event
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.http.client import SimpleHttpClient, is_unknown_endpoint
|
||||
from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
@@ -78,7 +81,11 @@ sent_todevice_counter = Counter(
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
@@ -121,6 +128,47 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -128,10 +176,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
try:
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -140,9 +190,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except CodeMessageException as e:
|
||||
if e.code == 404:
|
||||
return False
|
||||
logger.warning("query_user to %s received %s", uri, e.code)
|
||||
logger.warning("query_user to %s received %s", service.url, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
logger.warning("query_user to %s threw exception %s", service.url, ex)
|
||||
return False
|
||||
|
||||
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
|
||||
@@ -152,21 +202,23 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
try:
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||
logger.warning("query_alias to %s received %s", service.url, e.code)
|
||||
if e.code == 404:
|
||||
return False
|
||||
except Exception as ex:
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
logger.warning("query_alias to %s threw exception %s", service.url, ex)
|
||||
return False
|
||||
|
||||
async def query_3pe(
|
||||
@@ -188,25 +240,24 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
"query_3pe to %s returned an invalid response %r",
|
||||
service.url,
|
||||
response,
|
||||
)
|
||||
return []
|
||||
|
||||
@@ -216,12 +267,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
ret.append(r)
|
||||
else:
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid result %r", uri, r
|
||||
"query_3pe to %s returned an invalid result %r", service.url, r
|
||||
)
|
||||
|
||||
return ret
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
logger.warning("query_3pe to %s threw exception %s", service.url, ex)
|
||||
return []
|
||||
|
||||
async def get_3pe_protocol(
|
||||
@@ -233,21 +284,20 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
async def _get() -> Optional[JsonDict]:
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = await self.get_json(
|
||||
uri,
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s did not return a valid result", uri
|
||||
"query_3pe_protocol to %s did not return a valid result",
|
||||
service.url,
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -260,12 +310,27 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
return info
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s threw exception %s", service.url, ex
|
||||
)
|
||||
return None
|
||||
|
||||
key = (service.id, protocol)
|
||||
return await self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None:
|
||||
# The caller should check that url is set
|
||||
assert service.url is not None, "ping called without URL being set"
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
await self.post_json_get_json(
|
||||
uri=f"{service.url}{APP_SERVICE_PREFIX}/ping",
|
||||
post_json={"transaction_id": txn_id},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
async def push_bulk(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
@@ -305,8 +370,6 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
)
|
||||
txn_id = 0
|
||||
|
||||
uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
|
||||
|
||||
# Never send ephemeral events to appservices that do not support it
|
||||
body: JsonDict = {"events": serialized_events}
|
||||
if service.supports_ephemeral:
|
||||
@@ -338,8 +401,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self.put_json(
|
||||
uri=uri,
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
@@ -347,7 +413,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
"push_bulk to %s succeeded! events=%s",
|
||||
uri,
|
||||
service.url,
|
||||
[event.get("event_id") for event in events],
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
@@ -358,7 +424,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except CodeMessageException as e:
|
||||
logger.warning(
|
||||
"push_bulk to %s received code=%s msg=%s",
|
||||
uri,
|
||||
service.url,
|
||||
e.code,
|
||||
e.msg,
|
||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
||||
@@ -366,7 +432,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
except Exception as ex:
|
||||
logger.warning(
|
||||
"push_bulk to %s threw exception(%s) %s args=%s",
|
||||
uri,
|
||||
service.url,
|
||||
type(ex).__name__,
|
||||
ex,
|
||||
ex.args,
|
||||
@@ -375,6 +441,121 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
return False
|
||||
|
||||
async def claim_client_keys(
|
||||
self, service: "ApplicationService", query: List[Tuple[str, str, str, int]]
|
||||
) -> Tuple[
|
||||
Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
|
||||
]:
|
||||
"""Claim one time keys from an application service.
|
||||
|
||||
Note that any error (including a timeout) is treated as the application
|
||||
service having no information.
|
||||
|
||||
Args:
|
||||
service: The application service to query.
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
A map of user ID -> a map device ID -> a map of key ID -> JSON dict.
|
||||
|
||||
A copy of the input which has not been fulfilled because the
|
||||
appservice doesn't support this endpoint or has not returned
|
||||
data for that tuple.
|
||||
"""
|
||||
if service.url is None:
|
||||
return {}, query
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
# Create the expected payload shape.
|
||||
body: Dict[str, Dict[str, List[str]]] = {}
|
||||
for user_id, device, algorithm, count in query:
|
||||
body.setdefault(user_id, {}).setdefault(device, []).extend(
|
||||
[algorithm] * count
|
||||
)
|
||||
|
||||
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim"
|
||||
try:
|
||||
response = await self.post_json_get_json(
|
||||
uri,
|
||||
body,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# The appservice doesn't support this endpoint.
|
||||
if is_unknown_endpoint(e):
|
||||
return {}, query
|
||||
logger.warning("claim_keys to %s received %s", uri, e.code)
|
||||
return {}, query
|
||||
except Exception as ex:
|
||||
logger.warning("claim_keys to %s threw exception %s", uri, ex)
|
||||
return {}, query
|
||||
|
||||
# Check if the appservice fulfilled all of the queried user/device/algorithms
|
||||
# or if some are still missing.
|
||||
#
|
||||
# TODO This places a lot of faith in the response shape being correct.
|
||||
missing = []
|
||||
for user_id, device, algorithm, count in query:
|
||||
# Count the number of keys in the response for this algorithm by
|
||||
# checking which key IDs start with the algorithm. This uses that
|
||||
# True == 1 in Python to generate a count.
|
||||
response_count = sum(
|
||||
key_id.startswith(f"{algorithm}:")
|
||||
for key_id in response.get(user_id, {}).get(device, {})
|
||||
)
|
||||
count -= response_count
|
||||
# If the appservice responds with fewer keys than requested, then
|
||||
# consider the request unfulfilled.
|
||||
if count > 0:
|
||||
missing.append((user_id, device, algorithm, count))
|
||||
|
||||
return response, missing
|
||||
|
||||
async def query_keys(
|
||||
self, service: "ApplicationService", query: Dict[str, List[str]]
|
||||
) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
|
||||
"""Query the application service for keys.
|
||||
|
||||
Note that any error (including a timeout) is treated as the application
|
||||
service having no information.
|
||||
|
||||
Args:
|
||||
service: The application service to query.
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A map of device_keys/master_keys/self_signing_keys/user_signing_keys:
|
||||
|
||||
device_keys is a map of user ID -> a map device ID -> device info.
|
||||
"""
|
||||
if service.url is None:
|
||||
return {}
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3984/keys/query"
|
||||
try:
|
||||
response = await self.post_json_get_json(
|
||||
uri,
|
||||
query,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# The appservice doesn't support this endpoint.
|
||||
if is_unknown_endpoint(e):
|
||||
return {}
|
||||
logger.warning("query_keys to %s received %s", uri, e.code)
|
||||
return {}
|
||||
except Exception as ex:
|
||||
logger.warning("query_keys to %s threw exception %s", uri, ex)
|
||||
return {}
|
||||
|
||||
return response
|
||||
|
||||
def _serialize(
|
||||
self, service: "ApplicationService", events: Iterable[EventBase]
|
||||
) -> List[JsonDict]:
|
||||
|
||||
@@ -11,9 +11,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Any, Iterable
|
||||
from typing import Any, Dict, Iterable, Type, TypeVar
|
||||
|
||||
import jsonschema
|
||||
from pydantic import BaseModel, ValidationError, parse_obj_as
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.types import JsonDict
|
||||
@@ -64,3 +65,28 @@ def json_error_to_config_error(
|
||||
else:
|
||||
path.append(str(p))
|
||||
return ConfigError(e.message, path)
|
||||
|
||||
|
||||
Model = TypeVar("Model", bound=BaseModel)
|
||||
|
||||
|
||||
def parse_and_validate_mapping(
|
||||
config: Any,
|
||||
model_type: Type[Model],
|
||||
) -> Dict[str, Model]:
|
||||
"""Parse `config` as a mapping from strings to a given `Model` type.
|
||||
Args:
|
||||
config: The configuration data to check
|
||||
model_type: The BaseModel to validate and parse against.
|
||||
Returns:
|
||||
Fully validated and parsed Dict[str, Model].
|
||||
Raises:
|
||||
ConfigError, if given improper input.
|
||||
"""
|
||||
try:
|
||||
# type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because
|
||||
# `model_type` is a runtime variable. Pydantic is fine with this.
|
||||
instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type]
|
||||
except ValidationError as e:
|
||||
raise ConfigError(str(e)) from e
|
||||
return instances
|
||||
|
||||
@@ -33,6 +33,16 @@ class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
if not isinstance(self.app_service_config_files, list) or not all(
|
||||
type(x) is str for x in self.app_service_config_files
|
||||
):
|
||||
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||
raise ConfigError(
|
||||
"Expected '%s' to be a list of AS config files:"
|
||||
% (self.app_service_config_files),
|
||||
"app_service_config_files",
|
||||
)
|
||||
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
|
||||
|
||||
@@ -40,10 +50,6 @@ def load_appservices(
|
||||
hostname: str, config_files: List[str]
|
||||
) -> List[ApplicationService]:
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
seen_as_tokens: Dict[str, str] = {}
|
||||
|
||||
@@ -74,6 +74,16 @@ class ExperimentalConfig(Config):
|
||||
"msc3202_transaction_extensions", False
|
||||
)
|
||||
|
||||
# MSC3983: Proxying OTK claim requests to exclusive ASes.
|
||||
self.msc3983_appservice_otk_claims: bool = experimental.get(
|
||||
"msc3983_appservice_otk_claims", False
|
||||
)
|
||||
|
||||
# MSC3984: Proxying key queries to exclusive ASes.
|
||||
self.msc3984_appservice_key_query: bool = experimental.get(
|
||||
"msc3984_appservice_key_query", False
|
||||
)
|
||||
|
||||
# MSC3706 (server-side support for partial state in /send_join responses)
|
||||
# Synapse will always serve partial state responses to requests using the stable
|
||||
# query parameter `omit_members`. If this flag is set, Synapse will also serve
|
||||
@@ -178,3 +188,19 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3981: Recurse relations
|
||||
self.msc3981_recurse_relations = experimental.get(
|
||||
"msc3981_recurse_relations", False
|
||||
)
|
||||
|
||||
# MSC3970: Scope transaction IDs to devices
|
||||
self.msc3970_enabled = experimental.get("msc3970_enabled", False)
|
||||
|
||||
# MSC4009: E.164 Matrix IDs
|
||||
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
)
|
||||
|
||||
@@ -136,6 +136,7 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": SsoAttributeRequirement.JSON_SCHEMA,
|
||||
},
|
||||
"enable_registration": {"type": "boolean"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -306,6 +307,7 @@ def _parse_oidc_config_dict(
|
||||
user_mapping_provider_class=user_mapping_provider_class,
|
||||
user_mapping_provider_config=user_mapping_provider_config,
|
||||
attribute_requirements=attribute_requirements,
|
||||
enable_registration=oidc_config.get("enable_registration", True),
|
||||
)
|
||||
|
||||
|
||||
@@ -405,3 +407,6 @@ class OidcProviderConfig:
|
||||
|
||||
# required attributes to require in userinfo to allow login/registration
|
||||
attribute_requirements: List[SsoAttributeRequirement]
|
||||
|
||||
# Whether automatic registrations are enabled in the ODIC flow. Defaults to True
|
||||
enable_registration: bool
|
||||
|
||||
@@ -42,11 +42,17 @@ class PushConfig(Config):
|
||||
|
||||
# Now check for the one in the 'email' section and honour it,
|
||||
# with a warning.
|
||||
push_config = config.get("email") or {}
|
||||
redact_content = push_config.get("redact_content")
|
||||
email_push_config = config.get("email") or {}
|
||||
redact_content = email_push_config.get("redact_content")
|
||||
if redact_content is not None:
|
||||
print(
|
||||
"The 'email.redact_content' option is deprecated: "
|
||||
"please set push.include_content instead"
|
||||
)
|
||||
self.push_include_content = not redact_content
|
||||
|
||||
# Whether to apply a random delay to outbound push.
|
||||
self.push_jitter_delay_ms = None
|
||||
push_jitter_delay = push_config.get("jitter_delay", None)
|
||||
if push_jitter_delay:
|
||||
self.push_jitter_delay_ms = self.parse_duration(push_jitter_delay)
|
||||
|
||||
@@ -35,3 +35,9 @@ class RedisConfig(Config):
|
||||
self.redis_port = redis_config.get("port", 6379)
|
||||
self.redis_dbid = redis_config.get("dbid", None)
|
||||
self.redis_password = redis_config.get("password")
|
||||
|
||||
self.redis_use_tls = redis_config.get("use_tls", False)
|
||||
self.redis_certificate = redis_config.get("certificate_file", None)
|
||||
self.redis_private_key = redis_config.get("private_key_file", None)
|
||||
self.redis_ca_file = redis_config.get("ca_file", None)
|
||||
self.redis_ca_path = redis_config.get("ca_path", None)
|
||||
|
||||
@@ -137,6 +137,10 @@ class ContentRepositoryConfig(Config):
|
||||
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
|
||||
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
|
||||
|
||||
self.prevent_media_downloads_from = config.get(
|
||||
"prevent_media_downloads_from", []
|
||||
)
|
||||
|
||||
self.media_store_path = self.ensure_directory(
|
||||
config.get("media_store_path", "media_store")
|
||||
)
|
||||
|
||||
@@ -75,3 +75,7 @@ class RoomConfig(Config):
|
||||
% preset
|
||||
)
|
||||
# We validate the actual overrides when we try to apply them.
|
||||
|
||||
# When enabled, users will forget rooms when they leave them, either via a
|
||||
# leave, kick or ban.
|
||||
self.forget_on_leave = config.get("forget_rooms_on_leave", False)
|
||||
|
||||
@@ -214,17 +214,52 @@ class HttpListenerConfig:
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ListenerConfig:
|
||||
"""Object describing the configuration of a single listener."""
|
||||
class TCPListenerConfig:
|
||||
"""Object describing the configuration of a single TCP listener."""
|
||||
|
||||
port: int = attr.ib(validator=attr.validators.instance_of(int))
|
||||
bind_addresses: List[str]
|
||||
bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List))
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls: bool = False
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
def get_site_tag(self) -> str:
|
||||
"""Retrieves http_options.tag if it exists, otherwise the port number."""
|
||||
if self.http_options and self.http_options.tag is not None:
|
||||
return self.http_options.tag
|
||||
else:
|
||||
return str(self.port)
|
||||
|
||||
def is_tls(self) -> bool:
|
||||
return self.tls
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class UnixListenerConfig:
|
||||
"""Object describing the configuration of a single Unix socket listener."""
|
||||
|
||||
# Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling
|
||||
# reverse proxy
|
||||
path: str = attr.ib()
|
||||
# A default(0o666) for this is set in parse_listener_def() below
|
||||
mode: int
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
def get_site_tag(self) -> str:
|
||||
return "unix"
|
||||
|
||||
def is_tls(self) -> bool:
|
||||
"""Unix sockets can't have TLS"""
|
||||
return False
|
||||
|
||||
|
||||
ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ManholeConfig:
|
||||
@@ -531,12 +566,12 @@ class ServerConfig(Config):
|
||||
|
||||
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
# no_tls is not really supported anymore, but let's grandfather it in here.
|
||||
if config.get("no_tls", False):
|
||||
l2 = []
|
||||
for listener in self.listeners:
|
||||
if listener.tls:
|
||||
if isinstance(listener, TCPListenerConfig) and listener.tls:
|
||||
# Use isinstance() as the assertion this *has* a listener.port
|
||||
logger.info(
|
||||
"Ignoring TLS-enabled listener on port %i due to no_tls",
|
||||
listener.port,
|
||||
@@ -577,7 +612,7 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.listeners.append(
|
||||
ListenerConfig(
|
||||
TCPListenerConfig(
|
||||
port=bind_port,
|
||||
bind_addresses=[bind_host],
|
||||
tls=True,
|
||||
@@ -589,7 +624,7 @@ class ServerConfig(Config):
|
||||
unsecure_port = config.get("unsecure_port", bind_port - 400)
|
||||
if unsecure_port:
|
||||
self.listeners.append(
|
||||
ListenerConfig(
|
||||
TCPListenerConfig(
|
||||
port=unsecure_port,
|
||||
bind_addresses=[bind_host],
|
||||
tls=False,
|
||||
@@ -601,7 +636,7 @@ class ServerConfig(Config):
|
||||
manhole = config.get("manhole")
|
||||
if manhole:
|
||||
self.listeners.append(
|
||||
ListenerConfig(
|
||||
TCPListenerConfig(
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
@@ -648,7 +683,7 @@ class ServerConfig(Config):
|
||||
logger.warning(METRICS_PORT_WARNING)
|
||||
|
||||
self.listeners.append(
|
||||
ListenerConfig(
|
||||
TCPListenerConfig(
|
||||
port=metrics_port,
|
||||
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
|
||||
type="http",
|
||||
@@ -724,7 +759,7 @@ class ServerConfig(Config):
|
||||
self.delete_stale_devices_after = None
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(listener.tls for listener in self.listeners)
|
||||
return any(listener.is_tls() for listener in self.listeners)
|
||||
|
||||
def generate_config_section(
|
||||
self,
|
||||
@@ -904,25 +939,25 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
|
||||
|
||||
port = listener.get("port")
|
||||
if type(port) is not int:
|
||||
socket_path = listener.get("path")
|
||||
# Either a port or a path should be declared at a minimum. Using both would be bad.
|
||||
if port is not None and not isinstance(port, int):
|
||||
raise ConfigError("Listener configuration is lacking a valid 'port' option")
|
||||
if socket_path is not None and not isinstance(socket_path, str):
|
||||
raise ConfigError("Listener configuration is lacking a valid 'path' option")
|
||||
if port and socket_path:
|
||||
raise ConfigError(
|
||||
"Can not have both a UNIX socket and an IP/port declared for the same "
|
||||
"resource!"
|
||||
)
|
||||
if port is None and socket_path is None:
|
||||
raise ConfigError(
|
||||
"Must have either a UNIX socket or an IP/port declared for a given "
|
||||
"resource!"
|
||||
)
|
||||
|
||||
tls = listener.get("tls", False)
|
||||
|
||||
bind_addresses = listener.get("bind_addresses", [])
|
||||
bind_address = listener.get("bind_address")
|
||||
# if bind_address was specified, add it to the list of addresses
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
|
||||
# if we still have an empty list of addresses, use the default list
|
||||
if not bind_addresses:
|
||||
if listener_type == "metrics":
|
||||
# the metrics listener doesn't support IPv6
|
||||
bind_addresses.append("0.0.0.0")
|
||||
else:
|
||||
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||
|
||||
http_config = None
|
||||
if listener_type == "http":
|
||||
try:
|
||||
@@ -932,8 +967,12 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
except ValueError as e:
|
||||
raise ConfigError("Unknown listener resource") from e
|
||||
|
||||
# For a unix socket, default x_forwarded to True, as this is the only way of
|
||||
# getting a client IP.
|
||||
# Note: a reverse proxy is required anyway, as there is no way of exposing a
|
||||
# unix socket to the internet.
|
||||
http_config = HttpListenerConfig(
|
||||
x_forwarded=listener.get("x_forwarded", False),
|
||||
x_forwarded=listener.get("x_forwarded", (True if socket_path else False)),
|
||||
resources=resources,
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
@@ -941,7 +980,30 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
if socket_path:
|
||||
# TODO: Add in path validation, like if the directory exists and is writable?
|
||||
# Set a default for the permission, in case it's left out
|
||||
socket_mode = listener.get("mode", 0o666)
|
||||
|
||||
return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config)
|
||||
|
||||
else:
|
||||
assert port is not None
|
||||
bind_addresses = listener.get("bind_addresses", [])
|
||||
bind_address = listener.get("bind_address")
|
||||
# if bind_address was specified, add it to the list of addresses
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
|
||||
# if we still have an empty list of addresses, use the default list
|
||||
if not bind_addresses:
|
||||
if listener_type == "metrics":
|
||||
# the metrics listener doesn't support IPv6
|
||||
bind_addresses.append("0.0.0.0")
|
||||
else:
|
||||
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||
|
||||
return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
|
||||
|
||||
_MANHOLE_SETTINGS_SCHEMA = {
|
||||
|
||||
@@ -18,22 +18,40 @@ import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import (
|
||||
from synapse.config._base import (
|
||||
Config,
|
||||
ConfigError,
|
||||
RoutableShardedWorkerHandlingConfig,
|
||||
ShardedWorkerHandlingConfig,
|
||||
)
|
||||
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
|
||||
from synapse.config._util import parse_and_validate_mapping
|
||||
from synapse.config.server import (
|
||||
DIRECT_TCP_ERROR,
|
||||
TCPListenerConfig,
|
||||
parse_listener_def,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
"""
|
||||
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
|
||||
Missing data for a worker to connect to main process. Please include '%s' in the
|
||||
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
|
||||
solution) in every worker's yaml as various `worker_replication_*` settings as defined
|
||||
in workers documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
|
||||
"""
|
||||
# This allows for a handy knob when it's time to change from 'master' to
|
||||
# something with less 'history'
|
||||
MAIN_PROCESS_INSTANCE_NAME = "master"
|
||||
# Use this to adjust what the main process is known as in the yaml instance_map
|
||||
MAIN_PROCESS_INSTANCE_MAP_NAME = "main"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -47,13 +65,43 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
return obj
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class InstanceLocationConfig:
|
||||
class ConfigModel(BaseModel):
|
||||
"""A custom version of Pydantic's BaseModel which
|
||||
|
||||
- ignores unknown fields and
|
||||
- does not allow fields to be overwritten after construction,
|
||||
|
||||
but otherwise uses Pydantic's default behaviour.
|
||||
|
||||
For now, ignore unknown fields. In the future, we could change this so that unknown
|
||||
config values cause a ValidationError, provided the error messages are meaningful to
|
||||
server operators.
|
||||
|
||||
Subclassing in this way is recommended by
|
||||
https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally
|
||||
"""
|
||||
|
||||
class Config:
|
||||
# By default, ignore fields that we don't recognise.
|
||||
extra = Extra.ignore
|
||||
# By default, don't allow fields to be reassigned after parsing.
|
||||
allow_mutation = False
|
||||
|
||||
|
||||
class InstanceLocationConfig(ConfigModel):
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host: str
|
||||
port: int
|
||||
tls: bool = False
|
||||
host: StrictStr
|
||||
port: StrictInt
|
||||
tls: StrictBool = False
|
||||
|
||||
def scheme(self) -> str:
|
||||
"""Hardcode a retrievable scheme based on self.tls"""
|
||||
return "https" if self.tls else "http"
|
||||
|
||||
def netloc(self) -> str:
|
||||
"""Nicely format the network location data"""
|
||||
return f"{self.host}:{self.port}"
|
||||
|
||||
|
||||
@attr.s
|
||||
@@ -126,27 +174,15 @@ class WorkerConfig(Config):
|
||||
raise ConfigError("worker_log_config must be a string")
|
||||
self.worker_log_config = worker_log_config
|
||||
|
||||
# The host used to connect to the main synapse
|
||||
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||
|
||||
# The port on the main synapse for TCP replication
|
||||
if "worker_replication_port" in config:
|
||||
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
self.worker_replication_http_tls = config.get(
|
||||
"worker_replication_http_tls", False
|
||||
)
|
||||
|
||||
# The shared secret used for authentication when connecting to the main synapse.
|
||||
self.worker_replication_secret = config.get("worker_replication_secret", None)
|
||||
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
self.instance_name = self.worker_name or "master"
|
||||
self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME
|
||||
|
||||
# FIXME: Remove this check after a suitable amount of time.
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
@@ -161,7 +197,7 @@ class WorkerConfig(Config):
|
||||
manhole = config.get("worker_manhole")
|
||||
if manhole:
|
||||
self.worker_listeners.append(
|
||||
ListenerConfig(
|
||||
TCPListenerConfig(
|
||||
port=manhole,
|
||||
bind_addresses=["127.0.0.1"],
|
||||
type="manhole",
|
||||
@@ -180,10 +216,55 @@ class WorkerConfig(Config):
|
||||
)
|
||||
|
||||
# A map from instance name to host/port of their HTTP replication endpoint.
|
||||
instance_map = config.get("instance_map") or {}
|
||||
self.instance_map = {
|
||||
name: InstanceLocationConfig(**c) for name, c in instance_map.items()
|
||||
}
|
||||
# Check if the main process is declared. Inject it into the map if it's not,
|
||||
# based first on if a 'main' block is declared then on 'worker_replication_*'
|
||||
# data. If both are available, default to instance_map. The main process
|
||||
# itself doesn't need this data as it would never have to talk to itself.
|
||||
instance_map: Dict[str, Any] = config.get("instance_map", {})
|
||||
|
||||
if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
|
||||
# The host used to connect to the main synapse
|
||||
main_host = config.get("worker_replication_host", None)
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
main_port = config.get("worker_replication_http_port")
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
main_tls = config.get("worker_replication_http_tls", False)
|
||||
|
||||
# For now, accept 'main' in the instance_map, but the replication system
|
||||
# expects 'master', force that into being until it's changed later.
|
||||
if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[
|
||||
MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
]
|
||||
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
|
||||
|
||||
# This is the backwards compatibility bit that handles the
|
||||
# worker_replication_* bits using setdefault() to not overwrite anything.
|
||||
elif main_host is not None and main_port is not None:
|
||||
instance_map.setdefault(
|
||||
MAIN_PROCESS_INSTANCE_NAME,
|
||||
{
|
||||
"host": main_host,
|
||||
"port": main_port,
|
||||
"tls": main_tls,
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
# If we've gotten here, it means that the main process is not on the
|
||||
# instance_map and that not enough worker_replication_* variables
|
||||
# were declared in the worker's yaml.
|
||||
raise ConfigError(
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
|
||||
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
)
|
||||
|
||||
self.instance_map: Dict[
|
||||
str, InstanceLocationConfig
|
||||
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
|
||||
|
||||
# Map from type of streams to source, c.f. WriterLocations.
|
||||
writers = config.get("stream_writers") or {}
|
||||
|
||||
@@ -51,7 +51,7 @@ def check_event_content_hash(
|
||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||
# or a weird type by basically treating it the same as an unhashed event.
|
||||
hashes = event.get("hashes")
|
||||
# nb it might be a frozendict or a dict
|
||||
# nb it might be a immutabledict or a dict
|
||||
if not isinstance(hashes, collections.abc.Mapping):
|
||||
raise SynapseError(
|
||||
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
|
||||
|
||||
@@ -150,18 +150,19 @@ class Keyring:
|
||||
def __init__(
|
||||
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
|
||||
):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
if key_fetchers is None:
|
||||
key_fetchers = (
|
||||
# Fetch keys from the database.
|
||||
StoreKeyFetcher(hs),
|
||||
# Fetch keys from a configured Perspectives server.
|
||||
PerspectivesKeyFetcher(hs),
|
||||
# Fetch keys from the origin server directly.
|
||||
ServerKeyFetcher(hs),
|
||||
)
|
||||
self._key_fetchers = key_fetchers
|
||||
# Always fetch keys from the database.
|
||||
mutable_key_fetchers: List[KeyFetcher] = [StoreKeyFetcher(hs)]
|
||||
# Fetch keys from configured trusted key servers, if any exist.
|
||||
key_servers = hs.config.key.key_servers
|
||||
if key_servers:
|
||||
mutable_key_fetchers.append(PerspectivesKeyFetcher(hs))
|
||||
# Finally, fetch keys from the origin server directly.
|
||||
mutable_key_fetchers.append(ServerKeyFetcher(hs))
|
||||
|
||||
self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers)
|
||||
else:
|
||||
self._key_fetchers = key_fetchers
|
||||
|
||||
self._fetch_keys_queue: BatchingQueue[
|
||||
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
|
||||
@@ -172,7 +173,7 @@ class Keyring:
|
||||
process_batch_callback=self._inner_fetch_key_requests,
|
||||
)
|
||||
|
||||
self._hostname = hs.hostname
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
# build a FetchKeyResult for each of our own keys, to shortcircuit the
|
||||
# fetcher.
|
||||
@@ -276,7 +277,7 @@ class Keyring:
|
||||
|
||||
# If we are the originating server, short-circuit the key-fetch for any keys
|
||||
# we already have
|
||||
if verify_request.server_name == self._hostname:
|
||||
if self._is_mine_server_name(verify_request.server_name):
|
||||
for key_id in verify_request.key_ids:
|
||||
if key_id in self._local_verify_keys:
|
||||
found_keys[key_id] = self._local_verify_keys[key_id]
|
||||
@@ -510,7 +511,7 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
for key_id in queue_value.key_ids
|
||||
)
|
||||
|
||||
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
|
||||
res = await self.store.get_server_keys_json(key_ids_to_fetch)
|
||||
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
|
||||
for (server_name, key_id), key in res.items():
|
||||
keys.setdefault(server_name, {})[key_id] = key
|
||||
@@ -522,7 +523,6 @@ class BaseV2KeyFetcher(KeyFetcher):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastores().main
|
||||
self.config = hs.config
|
||||
|
||||
async def process_v2_response(
|
||||
self, from_server: str, response_json: JsonDict, time_added_ms: int
|
||||
@@ -626,7 +626,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self.client = hs.get_federation_http_client()
|
||||
self.key_servers = self.config.key.key_servers
|
||||
self.key_servers = hs.config.key.key_servers
|
||||
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
@@ -721,7 +721,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
)
|
||||
|
||||
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
|
||||
added_keys: List[Tuple[str, str, FetchKeyResult]] = []
|
||||
added_keys: Dict[Tuple[str, str], FetchKeyResult] = {}
|
||||
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
@@ -752,12 +752,30 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
# we continue to process the rest of the response
|
||||
continue
|
||||
|
||||
added_keys.extend(
|
||||
(server_name, key_id, key) for key_id, key in processed_response.items()
|
||||
)
|
||||
for key_id, key in processed_response.items():
|
||||
dict_key = (server_name, key_id)
|
||||
if dict_key in added_keys:
|
||||
already_present_key = added_keys[dict_key]
|
||||
logger.warning(
|
||||
"Duplicate server keys for %s (%s) from perspective %s (%r, %r)",
|
||||
server_name,
|
||||
key_id,
|
||||
perspective_name,
|
||||
already_present_key,
|
||||
key,
|
||||
)
|
||||
|
||||
if already_present_key.valid_until_ts > key.valid_until_ts:
|
||||
# Favour the entry with the largest valid_until_ts,
|
||||
# as `old_verify_keys` are also collected from this
|
||||
# response.
|
||||
continue
|
||||
|
||||
added_keys[dict_key] = key
|
||||
|
||||
keys.setdefault(server_name, {}).update(processed_response)
|
||||
|
||||
await self.store.store_server_verify_keys(
|
||||
await self.store.store_server_signature_keys(
|
||||
perspective_name, time_now_ms, added_keys
|
||||
)
|
||||
|
||||
|
||||
@@ -455,8 +455,11 @@ def _check_create(event: "EventBase") -> None:
|
||||
"room appears to have unsupported version %s" % (room_version_prop,),
|
||||
)
|
||||
|
||||
# 1.4 If content has no creator field, reject.
|
||||
if EventContentFields.ROOM_CREATOR not in event.content:
|
||||
# 1.4 If content has no creator field, reject if the room version requires it.
|
||||
if (
|
||||
not event.room_version.msc2175_implicit_room_creator
|
||||
and EventContentFields.ROOM_CREATOR not in event.content
|
||||
):
|
||||
raise AuthError(403, "Create event lacks a 'creator' property")
|
||||
|
||||
|
||||
@@ -491,7 +494,11 @@ def _is_membership_change_allowed(
|
||||
key = (EventTypes.Create, "")
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_event_ids()[0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
if room_version.msc2175_implicit_room_creator:
|
||||
creator = create.sender
|
||||
else:
|
||||
creator = create.content[EventContentFields.ROOM_CREATOR]
|
||||
if creator == event.state_key:
|
||||
return
|
||||
|
||||
target_user_id = event.state_key
|
||||
@@ -786,7 +793,7 @@ def check_redaction(
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
True if the the sender is allowed to redact the target event if the
|
||||
True if the sender is allowed to redact the target event if the
|
||||
target event was created by them.
|
||||
False if the sender is allowed to redact the target event with no
|
||||
further checks.
|
||||
@@ -1004,10 +1011,14 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
||||
# that.
|
||||
key = (EventTypes.Create, "")
|
||||
create_event = auth_events.get(key)
|
||||
if create_event is not None and create_event.content["creator"] == user_id:
|
||||
return 100
|
||||
else:
|
||||
return 0
|
||||
if create_event is not None:
|
||||
if create_event.room_version.msc2175_implicit_room_creator:
|
||||
creator = create_event.sender
|
||||
else:
|
||||
creator = create_event.content[EventContentFields.ROOM_CREATOR]
|
||||
if creator == user_id:
|
||||
return 100
|
||||
return 0
|
||||
|
||||
|
||||
def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int:
|
||||
@@ -1043,10 +1054,15 @@ def _verify_third_party_invite(
|
||||
"""
|
||||
if "third_party_invite" not in event.content:
|
||||
return False
|
||||
if "signed" not in event.content["third_party_invite"]:
|
||||
third_party_invite = event.content["third_party_invite"]
|
||||
if not isinstance(third_party_invite, collections.abc.Mapping):
|
||||
return False
|
||||
signed = event.content["third_party_invite"]["signed"]
|
||||
for key in {"mxid", "token"}:
|
||||
if "signed" not in third_party_invite:
|
||||
return False
|
||||
signed = third_party_invite["signed"]
|
||||
if not isinstance(signed, collections.abc.Mapping):
|
||||
return False
|
||||
for key in {"mxid", "token", "signatures"}:
|
||||
if key not in signed:
|
||||
return False
|
||||
|
||||
@@ -1064,8 +1080,6 @@ def _verify_third_party_invite(
|
||||
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
|
||||
for public_key_object in get_public_keys(invite_event):
|
||||
public_key = public_key_object["public_key"]
|
||||
@@ -1077,7 +1091,9 @@ def _verify_third_party_invite(
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name, decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
# verify_signed_json incorrectly states it wants a dict, it
|
||||
# just needs a mapping.
|
||||
verify_signed_json(signed, server, verify_key) # type: ignore[arg-type]
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
|
||||
@@ -198,10 +198,17 @@ class _EventInternalMetadata:
|
||||
soft_failed: DictProperty[bool] = DictProperty("soft_failed")
|
||||
proactively_send: DictProperty[bool] = DictProperty("proactively_send")
|
||||
redacted: DictProperty[bool] = DictProperty("redacted")
|
||||
txn_id: DictProperty[str] = DictProperty("txn_id")
|
||||
token_id: DictProperty[int] = DictProperty("token_id")
|
||||
historical: DictProperty[bool] = DictProperty("historical")
|
||||
|
||||
txn_id: DictProperty[str] = DictProperty("txn_id")
|
||||
"""The transaction ID, if it was set when the event was created."""
|
||||
|
||||
token_id: DictProperty[int] = DictProperty("token_id")
|
||||
"""The access token ID of the user who sent this event, if any."""
|
||||
|
||||
device_id: DictProperty[str] = DictProperty("device_id")
|
||||
"""The device ID of the user who sent this event, if any."""
|
||||
|
||||
# XXX: These are set by StreamWorkerStore._set_before_and_after.
|
||||
# I'm pretty sure that these are never persisted to the database, so shouldn't
|
||||
# be here
|
||||
@@ -326,7 +333,6 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
|
||||
origin: DictProperty[str] = DictProperty("origin")
|
||||
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
|
||||
redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None)
|
||||
room_id: DictProperty[str] = DictProperty("room_id")
|
||||
sender: DictProperty[str] = DictProperty("sender")
|
||||
# TODO state_key should be Optional[str]. This is generally asserted in Synapse
|
||||
@@ -346,6 +352,13 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
def membership(self) -> str:
|
||||
return self.content["membership"]
|
||||
|
||||
@property
|
||||
def redacts(self) -> Optional[str]:
|
||||
"""MSC2176 moved the redacts field into the content."""
|
||||
if self.room_version.msc2176_redaction_rules:
|
||||
return self.content.get("redacts")
|
||||
return self.get("redacts")
|
||||
|
||||
def is_state(self) -> bool:
|
||||
return self.get_state_key() is not None
|
||||
|
||||
@@ -462,7 +475,7 @@ class FrozenEvent(EventBase):
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
# copy.deepcopy
|
||||
signatures = {
|
||||
name: {sig_id: sig for sig_id, sig in sigs.items()}
|
||||
name: dict(sigs.items())
|
||||
for name, sigs in event_dict.pop("signatures", {}).items()
|
||||
}
|
||||
|
||||
@@ -510,7 +523,7 @@ class FrozenEventV2(EventBase):
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
# copy.deepcopy
|
||||
signatures = {
|
||||
name: {sig_id: sig for sig_id, sig in sigs.items()}
|
||||
name: dict(sigs.items())
|
||||
for name, sigs in event_dict.pop("signatures", {}).items()
|
||||
}
|
||||
|
||||
|
||||
@@ -173,7 +173,9 @@ class EventBuilder:
|
||||
if self.is_state():
|
||||
event_dict["state_key"] = self._state_key
|
||||
|
||||
if self._redacts is not None:
|
||||
# MSC2174 moves the redacts property to the content, it is invalid to
|
||||
# provide it as a top-level property.
|
||||
if self._redacts is not None and not self.room_version.msc2176_redaction_rules:
|
||||
event_dict["redacts"] = self._redacts
|
||||
|
||||
if self._origin_server_ts is not None:
|
||||
|
||||
@@ -15,7 +15,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
@@ -489,4 +489,4 @@ def _decode_state_dict(
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
return frozendict({(etype, state_key): v for etype, state_key, v in input})
|
||||
return immutabledict({(etype, state_key): v for etype, state_key, v in input})
|
||||
|
||||
@@ -106,7 +106,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
"depth",
|
||||
"prev_events",
|
||||
"auth_events",
|
||||
"origin",
|
||||
"origin_server_ts",
|
||||
]
|
||||
|
||||
@@ -114,6 +113,10 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
if not room_version.msc2176_redaction_rules:
|
||||
allowed_keys.extend(["prev_state", "membership"])
|
||||
|
||||
# Room versions before MSC3989 kept the origin field.
|
||||
if not room_version.msc3989_redaction_rules:
|
||||
allowed_keys.append("origin")
|
||||
|
||||
event_type = event_dict["type"]
|
||||
|
||||
new_content = {}
|
||||
@@ -127,6 +130,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
add_fields("membership")
|
||||
if room_version.msc3375_redaction_rules:
|
||||
add_fields(EventContentFields.AUTHORISING_USER)
|
||||
if room_version.msc3821_redaction_rules:
|
||||
# Preserve the signed field under third_party_invite.
|
||||
third_party_invite = event_dict["content"].get("third_party_invite")
|
||||
if isinstance(third_party_invite, collections.abc.Mapping):
|
||||
new_content["third_party_invite"] = {}
|
||||
if "signed" in third_party_invite:
|
||||
new_content["third_party_invite"]["signed"] = third_party_invite[
|
||||
"signed"
|
||||
]
|
||||
|
||||
elif event_type == EventTypes.Create:
|
||||
# MSC2176 rules state that create events cannot be redacted.
|
||||
if room_version.msc2176_redaction_rules:
|
||||
@@ -168,6 +181,18 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
||||
add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
|
||||
|
||||
# Protect the rel_type and event_id fields under the m.relates_to field.
|
||||
if room_version.msc3389_relation_redactions:
|
||||
relates_to = event_dict["content"].get("m.relates_to")
|
||||
if isinstance(relates_to, collections.abc.Mapping):
|
||||
new_relates_to = {}
|
||||
for field in ("rel_type", "event_id"):
|
||||
if field in relates_to:
|
||||
new_relates_to[field] = relates_to[field]
|
||||
# Only include a non-empty relates_to field.
|
||||
if new_relates_to:
|
||||
new_content["m.relates_to"] = new_relates_to
|
||||
|
||||
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
|
||||
|
||||
allowed_fields["content"] = new_content
|
||||
@@ -336,6 +361,7 @@ def serialize_event(
|
||||
time_now_ms: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
msc3970_enabled: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
@@ -343,6 +369,8 @@ def serialize_event(
|
||||
e
|
||||
time_now_ms
|
||||
config: Event serialization config
|
||||
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
|
||||
include the `transaction_id` in the event's `unsigned` section.
|
||||
|
||||
Returns:
|
||||
The serialized event dictionary.
|
||||
@@ -355,7 +383,7 @@ def serialize_event(
|
||||
time_now_ms = int(time_now_ms)
|
||||
|
||||
# Should this strip out None's?
|
||||
d = {k: v for k, v in e.get_dict().items()}
|
||||
d = dict(e.get_dict().items())
|
||||
|
||||
d["event_id"] = e.event_id
|
||||
|
||||
@@ -365,27 +393,43 @@ def serialize_event(
|
||||
|
||||
if "redacted_because" in e.unsigned:
|
||||
d["unsigned"]["redacted_because"] = serialize_event(
|
||||
e.unsigned["redacted_because"], time_now_ms, config=config
|
||||
e.unsigned["redacted_because"],
|
||||
time_now_ms,
|
||||
config=config,
|
||||
msc3970_enabled=msc3970_enabled,
|
||||
)
|
||||
|
||||
# If we have a txn_id saved in the internal_metadata, we should include it in the
|
||||
# unsigned section of the event if it was sent by the same session as the one
|
||||
# requesting the event.
|
||||
# There is a special case for guests, because they only have one access token
|
||||
# without associated access_token_id, so we always include the txn_id for events
|
||||
# they sent.
|
||||
txn_id = getattr(e.internal_metadata, "txn_id", None)
|
||||
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None and config.requester is not None:
|
||||
event_token_id = getattr(e.internal_metadata, "token_id", None)
|
||||
if config.requester.user.to_string() == e.sender and (
|
||||
(
|
||||
event_token_id is not None
|
||||
and config.requester.access_token_id is not None
|
||||
and event_token_id == config.requester.access_token_id
|
||||
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
|
||||
# event internal metadata. Since we were not recording them before, if it hasn't
|
||||
# been recorded, we fallback to the old behaviour.
|
||||
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
|
||||
if msc3970_enabled and event_device_id is not None:
|
||||
if event_device_id == config.requester.device_id:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
else:
|
||||
# The pre-MSC3970 behaviour is to only include the transaction ID if the
|
||||
# event was sent from the same access token. For regular users, we can use
|
||||
# the access token ID to determine this. For guests, we can't, but since
|
||||
# each guest only has one access token, we can just check that the event was
|
||||
# sent by the same user as the one requesting the event.
|
||||
event_token_id: Optional[int] = getattr(
|
||||
e.internal_metadata, "token_id", None
|
||||
)
|
||||
or config.requester.is_guest
|
||||
):
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
if config.requester.user.to_string() == e.sender and (
|
||||
(
|
||||
event_token_id is not None
|
||||
and config.requester.access_token_id is not None
|
||||
and event_token_id == config.requester.access_token_id
|
||||
)
|
||||
or config.requester.is_guest
|
||||
):
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
# invite_room_state and knock_room_state are a list of stripped room state events
|
||||
# that are meant to provide metadata about a room to an invitee/knocker. They are
|
||||
@@ -416,6 +460,9 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, *, msc3970_enabled: bool = False):
|
||||
self._msc3970_enabled = msc3970_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@@ -440,7 +487,9 @@ class EventClientSerializer:
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
serialized_event = serialize_event(
|
||||
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
|
||||
)
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
@@ -498,7 +547,9 @@ class EventClientSerializer:
|
||||
# `sender` of the edit; however MSC3925 proposes extending it to the whole
|
||||
# of the edit, which is what we do here.
|
||||
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
|
||||
event_aggregations.replace, time_now, config=config
|
||||
event_aggregations.replace,
|
||||
time_now,
|
||||
config=config,
|
||||
)
|
||||
|
||||
# Include any threaded replies to this event.
|
||||
@@ -567,7 +618,7 @@ PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
def copy_and_fixup_power_levels_contents(
|
||||
old_power_levels: PowerLevelsContent,
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||
"""Copy the content of a power_levels event, unfreezing immutabledicts along the way.
|
||||
|
||||
We accept as input power level values which are strings, provided they represent an
|
||||
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||
|
||||
@@ -12,11 +12,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
from typing import Iterable, Type, Union, cast
|
||||
from typing import Iterable, List, Type, Union, cast
|
||||
|
||||
import jsonschema
|
||||
from pydantic import Field, StrictBool, StrictStr
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
|
||||
from synapse.api.constants import (
|
||||
MAX_ALIAS_LENGTH,
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
Membership,
|
||||
)
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import EventFormatVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -28,6 +34,8 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.federation.federation_server import server_matches_acl_event
|
||||
from synapse.http.servlet import validate_json_object
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.types import EventID, JsonDict, RoomID, UserID
|
||||
|
||||
|
||||
@@ -88,27 +96,27 @@ class EventValidator:
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Retention:
|
||||
elif event.type == EventTypes.Retention:
|
||||
self._validate_retention(event)
|
||||
|
||||
if event.type == EventTypes.ServerACL:
|
||||
elif event.type == EventTypes.ServerACL:
|
||||
if not server_matches_acl_event(config.server.server_name, event):
|
||||
raise SynapseError(
|
||||
400, "Can't create an ACL event that denies the local server"
|
||||
)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
elif event.type == EventTypes.PowerLevels:
|
||||
try:
|
||||
jsonschema.validate(
|
||||
instance=event.content,
|
||||
schema=POWER_LEVELS_SCHEMA,
|
||||
cls=plValidator,
|
||||
cls=POWER_LEVELS_VALIDATOR,
|
||||
)
|
||||
except jsonschema.ValidationError as e:
|
||||
if e.path:
|
||||
# example: "users_default": '0' is not of type 'integer'
|
||||
# cast safety: path entries can be integers, if we fail to validate
|
||||
# items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
|
||||
# items in an array. However, the POWER_LEVELS_SCHEMA doesn't expect
|
||||
# to see any arrays.
|
||||
message = (
|
||||
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
|
||||
@@ -125,6 +133,15 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
# If the event contains a mentions key, validate it.
|
||||
if (
|
||||
EventContentFields.MSC3952_MENTIONS in event.content
|
||||
and config.experimental.msc3952_intentional_mentions
|
||||
):
|
||||
validate_json_object(
|
||||
event.content[EventContentFields.MSC3952_MENTIONS], Mentions
|
||||
)
|
||||
|
||||
def _validate_retention(self, event: EventBase) -> None:
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
format enforced by the spec.
|
||||
@@ -253,12 +270,17 @@ POWER_LEVELS_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
class Mentions(RequestBodyModel):
|
||||
user_ids: List[StrictStr] = Field(default_factory=list)
|
||||
room: StrictBool = False
|
||||
|
||||
|
||||
# This could return something newer than Draft 7, but that's the current "latest"
|
||||
# validator.
|
||||
def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
|
||||
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
|
||||
def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
|
||||
validator = jsonschema.validators.validator_for(schema)
|
||||
|
||||
# by default jsonschema does not consider a frozendict to be an object so
|
||||
# by default jsonschema does not consider a immutabledict to be an object so
|
||||
# we need to use a custom type checker
|
||||
# https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types
|
||||
type_checker = validator.TYPE_CHECKER.redefine(
|
||||
@@ -268,4 +290,4 @@ def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
|
||||
return jsonschema.validators.extend(validator, type_checker=type_checker)
|
||||
|
||||
|
||||
plValidator = _create_power_level_validator()
|
||||
POWER_LEVELS_VALIDATOR = _create_validator(POWER_LEVELS_SCHEMA)
|
||||
|
||||
@@ -49,9 +49,9 @@ class FederationBase:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self.server_name = hs.hostname
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self.keyring = hs.get_keyring()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
self.store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
@@ -137,9 +137,9 @@ class FederationBase:
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu)
|
||||
|
||||
if spam_check != self.spam_checker.NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
log_kv(
|
||||
{
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.federation.federation_base import (
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.client import is_unknown_endpoint
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
@@ -234,7 +235,10 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
async def claim_client_keys(
|
||||
self, destination: str, content: JsonDict, timeout: Optional[int]
|
||||
self,
|
||||
destination: str,
|
||||
query: Dict[str, Dict[str, Dict[str, int]]],
|
||||
timeout: Optional[int],
|
||||
) -> JsonDict:
|
||||
"""Claims one-time keys for a device hosted on a remote server.
|
||||
|
||||
@@ -246,6 +250,50 @@ class FederationClient(FederationBase):
|
||||
The JSON object from the response
|
||||
"""
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
|
||||
# Convert the query with counts into a stable and unstable query and check
|
||||
# if attempting to claim more than 1 OTK.
|
||||
content: Dict[str, Dict[str, str]] = {}
|
||||
unstable_content: Dict[str, Dict[str, List[str]]] = {}
|
||||
use_unstable = False
|
||||
for user_id, one_time_keys in query.items():
|
||||
for device_id, algorithms in one_time_keys.items():
|
||||
if any(count > 1 for count in algorithms.values()):
|
||||
use_unstable = True
|
||||
if algorithms:
|
||||
# For the stable query, choose only the first algorithm.
|
||||
content.setdefault(user_id, {})[device_id] = next(iter(algorithms))
|
||||
# For the unstable query, repeat each algorithm by count, then
|
||||
# splat those into chain to get a flattened list of all algorithms.
|
||||
#
|
||||
# Converts from {"algo1": 2, "algo2": 2} to ["algo1", "algo1", "algo2"].
|
||||
unstable_content.setdefault(user_id, {})[device_id] = list(
|
||||
itertools.chain(
|
||||
*(
|
||||
itertools.repeat(algorithm, count)
|
||||
for algorithm, count in algorithms.items()
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
if use_unstable:
|
||||
try:
|
||||
return await self.transport_layer.claim_client_keys_unstable(
|
||||
destination, unstable_content, timeout
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't claim client keys with the unstable API, falling back to the v1 API"
|
||||
)
|
||||
else:
|
||||
logger.debug("Skipping unstable claim client keys API")
|
||||
|
||||
return await self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
@@ -279,15 +327,11 @@ class FederationClient(FederationBase):
|
||||
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||
|
||||
if not isinstance(transaction_data, dict):
|
||||
# TODO we probably want an exception type specific to federation
|
||||
# client validation.
|
||||
raise TypeError("Backfill transaction_data is not a dict.")
|
||||
raise InvalidResponseError("Backfill transaction_data is not a dict.")
|
||||
|
||||
transaction_data_pdus = transaction_data.get("pdus")
|
||||
if not isinstance(transaction_data_pdus, list):
|
||||
# TODO we probably want an exception type specific to federation
|
||||
# client validation.
|
||||
raise TypeError("transaction_data.pdus is not a list.")
|
||||
raise InvalidResponseError("transaction_data.pdus is not a list.")
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
@@ -759,43 +803,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
# MSC3743 specifies that servers should return a 404 or 405 with an errcode
|
||||
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
|
||||
# to an unknown method, respectively.
|
||||
#
|
||||
# Older versions of servers don't properly handle this. This needs to be
|
||||
# rather specific as some endpoints truly do return 404 errors.
|
||||
return (
|
||||
# 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
|
||||
(e.code == 404 or e.code == 405)
|
||||
and (
|
||||
# Older Dendrites returned a text or empty body.
|
||||
# Older Conduit returned an empty body.
|
||||
not e.response
|
||||
or e.response == b"404 page not found"
|
||||
# The proper response JSON with M_UNRECOGNIZED errcode.
|
||||
or synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
) or (
|
||||
# Older Synapses returned a 400 error.
|
||||
e.code == 400
|
||||
and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
@@ -847,7 +854,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
for destination in destinations:
|
||||
# We don't want to ask our own server for information we don't have
|
||||
if destination == self.server_name:
|
||||
if self._is_mine_server_name(destination):
|
||||
continue
|
||||
|
||||
try:
|
||||
@@ -887,7 +894,7 @@ class FederationClient(FederationBase):
|
||||
elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||
elif failover_on_unknown_endpoint and is_unknown_endpoint(
|
||||
e, synapse_error
|
||||
):
|
||||
failover = True
|
||||
@@ -1223,7 +1230,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
@@ -1297,7 +1304,7 @@ class FederationClient(FederationBase):
|
||||
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
||||
# Otherwise, consider it a legitimate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -1358,7 +1365,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
@@ -1529,7 +1536,7 @@ class FederationClient(FederationBase):
|
||||
self, destinations: Iterable[str], room_id: str, event_dict: JsonDict
|
||||
) -> None:
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
if self._is_mine_server_name(destination):
|
||||
continue
|
||||
|
||||
try:
|
||||
@@ -1629,7 +1636,7 @@ class FederationClient(FederationBase):
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the unstable endpoint. Otherwise, consider it a
|
||||
# legitimate error and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -86,7 +86,7 @@ from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.stringutils import parse_server_name
|
||||
@@ -129,12 +129,14 @@ class FederationServer(FederationBase):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.server_name = hs.hostname
|
||||
self.handler = hs.get_federation_handler()
|
||||
self._spam_checker = hs.get_spam_checker()
|
||||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
self._federation_event_handler = hs.get_federation_event_handler()
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
@@ -941,7 +943,7 @@ class FederationServer(FederationBase):
|
||||
authorising_server = get_domain_from_id(
|
||||
event.content[EventContentFields.AUTHORISING_USER]
|
||||
)
|
||||
if authorising_server != self.server_name:
|
||||
if not self._is_mine_server_name(authorising_server):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"Cannot authorise request from resident server: {authorising_server}",
|
||||
@@ -1004,23 +1006,19 @@ class FederationServer(FederationBase):
|
||||
|
||||
@trace
|
||||
async def on_claim_client_keys(
|
||||
self, origin: str, content: JsonDict
|
||||
self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool
|
||||
) -> Dict[str, Any]:
|
||||
query = []
|
||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||
for device_id, algorithm in device_keys.items():
|
||||
query.append((user_id, device_id, algorithm))
|
||||
|
||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||
results = await self.store.claim_e2e_one_time_keys(query)
|
||||
results = await self._e2e_keys_handler.claim_local_one_time_keys(
|
||||
query, always_include_fallback_keys=always_include_fallback_keys
|
||||
)
|
||||
|
||||
json_result: Dict[str, Dict[str, dict]] = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, json_str in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {
|
||||
key_id: json_decoder.decode(json_str)
|
||||
}
|
||||
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for result in results:
|
||||
for user_id, device_keys in result.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, key in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {key_id: key}
|
||||
|
||||
logger.info(
|
||||
"Claimed one-time-keys: %s",
|
||||
@@ -1129,7 +1127,7 @@ class FederationServer(FederationBase):
|
||||
logger.warning("event id %s: %s", pdu.event_id, e)
|
||||
raise FederationError("ERROR", 403, str(e), affected=pdu.event_id)
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(pdu):
|
||||
if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu):
|
||||
logger.warning(
|
||||
"Unstaged federated event contains spam, dropping %s", pdu.event_id
|
||||
)
|
||||
@@ -1174,7 +1172,9 @@ class FederationServer(FederationBase):
|
||||
|
||||
origin, event = next
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(event):
|
||||
if await self._spam_checker_module_callbacks.should_drop_federated_event(
|
||||
event
|
||||
):
|
||||
logger.warning(
|
||||
"Staged federated event contains spam, dropping %s",
|
||||
event.event_id,
|
||||
|
||||
@@ -68,6 +68,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
# We may have multiple federation sender instances, so we need to track
|
||||
# their positions separately.
|
||||
@@ -198,7 +199,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
if destination == self.server_name:
|
||||
if self.is_mine_server_name(destination):
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
@@ -244,7 +245,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
@@ -11,6 +11,119 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
The Federation Sender is responsible for sending Persistent Data Units (PDUs)
|
||||
and Ephemeral Data Units (EDUs) to other homeservers using
|
||||
the `/send` Federation API.
|
||||
|
||||
|
||||
## How do PDUs get sent?
|
||||
|
||||
The Federation Sender is made aware of new PDUs due to `FederationSender.notify_new_events`.
|
||||
When the sender is notified about a newly-persisted PDU that originates from this homeserver
|
||||
and is not an out-of-band event, we pass the PDU to the `_PerDestinationQueue` for each
|
||||
remote homeserver that is in the room at that point in the DAG.
|
||||
|
||||
|
||||
### Per-Destination Queues
|
||||
|
||||
There is one `PerDestinationQueue` per 'destination' homeserver.
|
||||
The `PerDestinationQueue` maintains the following information about the destination:
|
||||
|
||||
- whether the destination is currently in [catch-up mode (see below)](#catch-up-mode);
|
||||
- a queue of PDUs to be sent to the destination; and
|
||||
- a queue of EDUs to be sent to the destination (not considered in this section).
|
||||
|
||||
Upon a new PDU being enqueued, `attempt_new_transaction` is called to start a new
|
||||
transaction if there is not already one in progress.
|
||||
|
||||
|
||||
### Transactions and the Transaction Transmission Loop
|
||||
|
||||
Each federation HTTP request to the `/send` endpoint is referred to as a 'transaction'.
|
||||
The body of the HTTP request contains a list of PDUs and EDUs to send to the destination.
|
||||
|
||||
The *Transaction Transmission Loop* (`_transaction_transmission_loop`) is responsible
|
||||
for emptying the queued PDUs (and EDUs) from a `PerDestinationQueue` by sending
|
||||
them to the destination.
|
||||
|
||||
There can only be one transaction in flight for a given destination at any time.
|
||||
(Other than preventing us from overloading the destination, this also makes it easier to
|
||||
reason about because we process events sequentially for each destination.
|
||||
This is useful for *Catch-Up Mode*, described later.)
|
||||
|
||||
The loop continues so long as there is anything to send. At each iteration of the loop, we:
|
||||
|
||||
- dequeue up to 50 PDUs (and up to 100 EDUs).
|
||||
- make the `/send` request to the destination homeserver with the dequeued PDUs and EDUs.
|
||||
- if successful, make note of the fact that we succeeded in transmitting PDUs up to
|
||||
the given `stream_ordering` of the latest PDU by
|
||||
- if unsuccessful, back off from the remote homeserver for some time.
|
||||
If we have been unsuccessful for too long (when the backoff interval grows to exceed 1 hour),
|
||||
the in-memory queues are emptied and we enter [*Catch-Up Mode*, described below](#catch-up-mode).
|
||||
|
||||
|
||||
### Catch-Up Mode
|
||||
|
||||
When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop*
|
||||
(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`.
|
||||
(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour
|
||||
be resumed.)
|
||||
|
||||
*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to
|
||||
connection problems, is responsible for sending PDUs that have been missed by the destination
|
||||
homeserver. (PDUs can be missed because the `PerDestinationQueue` is volatile — i.e. resets
|
||||
on startup — and it does not hold PDUs forever if `/send` requests to the destination fail.)
|
||||
|
||||
The catch-up mechanism makes use of the `last_successful_stream_ordering` column in the
|
||||
`destinations` table (which gives the `stream_ordering` of the most recent successfully
|
||||
sent PDU) and the `stream_ordering` column in the `destination_rooms` table (which gives,
|
||||
for each room, the `stream_ordering` of the most recent PDU that needs to be sent to this
|
||||
destination).
|
||||
|
||||
Each iteration of the loop pulls out 50 `destination_rooms` entries with the oldest
|
||||
`stream_ordering`s that are greater than the `last_successful_stream_ordering`.
|
||||
In other words, from the set of latest PDUs in each room to be sent to the destination,
|
||||
the 50 oldest such PDUs are pulled out.
|
||||
|
||||
These PDUs could, in principle, now be directly sent to the destination. However, as an
|
||||
optimisation intended to prevent overloading destination homeservers, we instead attempt
|
||||
to send the latest forward extremities so long as the destination homeserver is still
|
||||
eligible to receive those.
|
||||
This reduces load on the destination **in aggregate** because all Synapse homeservers
|
||||
will behave according to this principle and therefore avoid sending lots of different PDUs
|
||||
at different points in the DAG to a recovering homeserver.
|
||||
*This optimisation is not currently valid in rooms which are partial-state on this homeserver,
|
||||
since we are unable to determine whether the destination homeserver is eligible to receive
|
||||
the latest forward extremities unless this homeserver sent those PDUs — in this case, we
|
||||
just send the latest PDUs originating from this server and skip this optimisation.*
|
||||
|
||||
Whilst PDUs are sent through this mechanism, the position of `last_successful_stream_ordering`
|
||||
is advanced as normal.
|
||||
Once there are no longer any rooms containing outstanding PDUs to be sent to the destination
|
||||
*that are not already in the `PerDestinationQueue` because they arrived since Catch-Up Mode
|
||||
was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmission_loop`.
|
||||
|
||||
|
||||
#### A note on failures and back-offs
|
||||
|
||||
If a remote server is unreachable over federation, we back off from that server,
|
||||
with an exponentially-increasing retry interval.
|
||||
Whilst we don't automatically retry after the interval, we prevent making new attempts
|
||||
until such time as the back-off has cleared.
|
||||
Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission
|
||||
loop resumes and empties the queue by making federation requests.
|
||||
|
||||
If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent
|
||||
unbounded growth) and Catch-Up Mode is entered.
|
||||
|
||||
It is worth noting that the back-off for a remote server is cleared once an inbound
|
||||
request from that remote server is received (see `notify_remote_server_up`).
|
||||
At this point, the transaction transmission loop is also started up, to proactively
|
||||
send missed PDUs and EDUs to the destination (i.e. you don't need to wait for a new PDU
|
||||
or EDU, destined for that destination, to be created in order to send out missed PDUs and
|
||||
EDUs).
|
||||
"""
|
||||
|
||||
import abc
|
||||
import logging
|
||||
@@ -249,6 +362,7 @@ class FederationSender(AbstractFederationSender):
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
self._presence_router: Optional["PresenceRouter"] = None
|
||||
self._transaction_manager = TransactionManager(hs)
|
||||
@@ -653,7 +767,7 @@ class FederationSender(AbstractFederationSender):
|
||||
domains = [
|
||||
d
|
||||
for d in domains_set
|
||||
if d != self.server_name
|
||||
if not self.is_mine_server_name(d)
|
||||
and self._federation_shard_config.should_handle(self._instance_name, d)
|
||||
]
|
||||
if not domains:
|
||||
@@ -719,7 +833,7 @@ class FederationSender(AbstractFederationSender):
|
||||
assert self.is_mine_id(state.user_id)
|
||||
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
if self.is_mine_server_name(destination):
|
||||
continue
|
||||
if not self._federation_shard_config.should_handle(
|
||||
self._instance_name, destination
|
||||
@@ -747,7 +861,7 @@ class FederationSender(AbstractFederationSender):
|
||||
content: content of EDU
|
||||
key: clobbering key for this edu
|
||||
"""
|
||||
if destination == self.server_name:
|
||||
if self.is_mine_server_name(destination):
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
@@ -783,8 +897,8 @@ class FederationSender(AbstractFederationSender):
|
||||
else:
|
||||
queue.send_edu(edu)
|
||||
|
||||
def send_device_messages(self, destination: str, immediate: bool = False) -> None:
|
||||
if destination == self.server_name:
|
||||
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
||||
if self.is_mine_server_name(destination):
|
||||
logger.warning("Not sending device update to ourselves")
|
||||
return
|
||||
|
||||
@@ -806,7 +920,7 @@ class FederationSender(AbstractFederationSender):
|
||||
might have come back.
|
||||
"""
|
||||
|
||||
if destination == self.server_name:
|
||||
if self.is_mine_server_name(destination):
|
||||
logger.warning("Not waking up ourselves")
|
||||
return
|
||||
|
||||
|
||||
@@ -497,8 +497,8 @@ class PerDestinationQueue:
|
||||
#
|
||||
# Note: `catchup_pdus` will have exactly one PDU per room.
|
||||
for pdu in catchup_pdus:
|
||||
# The PDU from the DB will be the last PDU in the room from
|
||||
# *this server* that wasn't sent to the remote. However, other
|
||||
# The PDU from the DB will be the newest PDU in the room from
|
||||
# *this server* that we tried---but were unable---to send to the remote.
|
||||
# servers may have sent lots of events since then, and we want
|
||||
# to try and tell the remote only about the *latest* events in
|
||||
# the room. This is so that it doesn't get inundated by events
|
||||
@@ -516,6 +516,11 @@ class PerDestinationQueue:
|
||||
# If the event is in the extremities, then great! We can just
|
||||
# use that without having to do further checks.
|
||||
room_catchup_pdus = [pdu]
|
||||
elif await self._store.is_partial_state_room(pdu.room_id):
|
||||
# We can't be sure which events the destination should
|
||||
# see using only partial state. Avoid doing so, and just retry
|
||||
# sending our the newest PDU the remote is missing from us.
|
||||
room_catchup_pdus = [pdu]
|
||||
else:
|
||||
# If not, fetch the extremities and figure out which we can
|
||||
# send.
|
||||
@@ -547,6 +552,8 @@ class PerDestinationQueue:
|
||||
self._server_name,
|
||||
new_pdus,
|
||||
redact=False,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
|
||||
# If we've filtered out all the extremities, fall back to
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
import logging
|
||||
import urllib
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Collection,
|
||||
@@ -42,21 +43,24 @@ from synapse.api.urls import (
|
||||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.federation.units import Transaction
|
||||
from synapse.http.matrixfederationclient import ByteParser
|
||||
from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import ExceptionBundle
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.app.homeserver import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayerClient:
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.client = hs.get_federation_http_client()
|
||||
self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
|
||||
async def get_room_state_ids(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
@@ -133,7 +137,7 @@ class TransportLayerClient:
|
||||
|
||||
async def backfill(
|
||||
self, destination: str, room_id: str, event_tuples: Collection[str], limit: int
|
||||
) -> Optional[JsonDict]:
|
||||
) -> Optional[Union[JsonDict, list]]:
|
||||
"""Requests `limit` previous PDUs in a given context before list of
|
||||
PDUs.
|
||||
|
||||
@@ -231,7 +235,7 @@ class TransportLayerClient:
|
||||
transaction.transaction_id,
|
||||
)
|
||||
|
||||
if transaction.destination == self.server_name:
|
||||
if self._is_mine_server_name(transaction.destination):
|
||||
raise RuntimeError("Transport layer cannot send to itself!")
|
||||
|
||||
# FIXME: This is only used by the tests. The actual json sent is
|
||||
@@ -388,6 +392,7 @@ class TransportLayerClient:
|
||||
# server was just having a momentary blip, the room will be out of
|
||||
# sync.
|
||||
ignore_backoff=True,
|
||||
parser=LegacyJsonSendParser(),
|
||||
)
|
||||
|
||||
async def send_leave_v2(
|
||||
@@ -445,7 +450,11 @@ class TransportLayerClient:
|
||||
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
|
||||
|
||||
return await self.client.put_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
parser=LegacyJsonSendParser(),
|
||||
)
|
||||
|
||||
async def send_invite_v2(
|
||||
@@ -641,10 +650,10 @@ class TransportLayerClient:
|
||||
|
||||
Response:
|
||||
{
|
||||
"device_keys": {
|
||||
"one_time_keys": {
|
||||
"<user_id>": {
|
||||
"<device_id>": {
|
||||
"<algorithm>:<key_id>": "<key_base64>"
|
||||
"<algorithm>:<key_id>": <OTK JSON>
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -660,7 +669,50 @@ class TransportLayerClient:
|
||||
path = _create_v1_path("/user/keys/claim")
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=query_content, timeout=timeout
|
||||
destination=destination,
|
||||
path=path,
|
||||
data={"one_time_keys": query_content},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
async def claim_client_keys_unstable(
|
||||
self, destination: str, query_content: JsonDict, timeout: Optional[int]
|
||||
) -> JsonDict:
|
||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||
|
||||
Request:
|
||||
{
|
||||
"one_time_keys": {
|
||||
"<user_id>": {
|
||||
"<device_id>": {"<algorithm>": <count>}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Response:
|
||||
{
|
||||
"one_time_keys": {
|
||||
"<user_id>": {
|
||||
"<device_id>": {
|
||||
"<algorithm>:<key_id>": <OTK JSON>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Args:
|
||||
destination: The server to query.
|
||||
query_content: The user ids to query.
|
||||
Returns:
|
||||
A dict containing the one-time keys.
|
||||
"""
|
||||
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/user/keys/claim")
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data={"one_time_keys": query_content},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
async def get_missing_events(
|
||||
|
||||
@@ -25,6 +25,7 @@ from synapse.federation.transport.server._base import (
|
||||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@@ -108,6 +109,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
|
||||
PATH = "/publicRooms"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -212,6 +214,7 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@@ -296,6 +299,11 @@ def register_servlets(
|
||||
and not hs.config.experimental.msc3720_enabled
|
||||
):
|
||||
continue
|
||||
if (
|
||||
servletclass == FederationUnstableClientKeysClaimServlet
|
||||
and not hs.config.experimental.msc3983_appservice_otk_claims
|
||||
):
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
hs=hs,
|
||||
|
||||
@@ -57,6 +57,7 @@ class Authenticator:
|
||||
self._clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self.store = hs.get_datastores().main
|
||||
self.federation_domain_whitelist = (
|
||||
hs.config.federation.federation_domain_whitelist
|
||||
@@ -100,7 +101,9 @@ class Authenticator:
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
# if the origin_server sent a destination along it needs to match our own server_name
|
||||
if destination is not None and destination != self.server_name:
|
||||
if destination is not None and not self._is_mine_server_name(
|
||||
destination
|
||||
):
|
||||
raise AuthenticationError(
|
||||
HTTPStatus.UNAUTHORIZED,
|
||||
"Destination mismatch in auth header",
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import Counter
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
@@ -70,6 +71,7 @@ class BaseFederationServerServlet(BaseFederationServlet):
|
||||
|
||||
class FederationSendServlet(BaseFederationServerServlet):
|
||||
PATH = "/send/(?P<transaction_id>[^/]*)/?"
|
||||
CATEGORY = "Inbound federation transaction request"
|
||||
|
||||
# We ratelimit manually in the handler as we queue up the requests and we
|
||||
# don't want to fill up the ratelimiter with blocked requests.
|
||||
@@ -138,6 +140,7 @@ class FederationSendServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationEventServlet(BaseFederationServerServlet):
|
||||
PATH = "/event/(?P<event_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
async def on_GET(
|
||||
@@ -152,6 +155,7 @@ class FederationEventServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationStateV1Servlet(BaseFederationServerServlet):
|
||||
PATH = "/state/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when someone asks for all data for a given room.
|
||||
async def on_GET(
|
||||
@@ -170,6 +174,7 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServerServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -187,6 +192,7 @@ class FederationStateIdsServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServerServlet):
|
||||
PATH = "/backfill/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -225,6 +231,7 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
|
||||
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -246,6 +253,7 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/query/(?P<query_type>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
async def on_GET(
|
||||
@@ -262,6 +270,7 @@ class FederationQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -297,6 +306,7 @@ class FederationMakeJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -312,6 +322,7 @@ class FederationMakeLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -327,6 +338,7 @@ class FederationV1SendLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -344,6 +356,7 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationMakeKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -366,6 +379,7 @@ class FederationMakeKnockServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendKnockServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -381,6 +395,7 @@ class FederationV1SendKnockServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServerServlet):
|
||||
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -395,6 +410,7 @@ class FederationEventAuthServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -412,6 +428,7 @@ class FederationV1SendJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -455,6 +472,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV1InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -479,6 +497,7 @@ class FederationV1InviteServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@@ -515,6 +534,7 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
@@ -529,6 +549,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/query"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
@@ -538,6 +559,7 @@ class FederationClientKeysQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
@@ -551,16 +573,54 @@ class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
|
||||
PATH = "/user/keys/claim"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
response = await self.handler.on_claim_client_keys(origin, content)
|
||||
# Generate a count for each algorithm, which is hard-coded to 1.
|
||||
key_query: List[Tuple[str, str, str, int]] = []
|
||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||
for device_id, algorithm in device_keys.items():
|
||||
key_query.append((user_id, device_id, algorithm, 1))
|
||||
|
||||
response = await self.handler.on_claim_client_keys(
|
||||
key_query, always_include_fallback_keys=False
|
||||
)
|
||||
return 200, response
|
||||
|
||||
|
||||
class FederationUnstableClientKeysClaimServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Identical to the stable endpoint (FederationClientKeysClaimServlet) except
|
||||
it allows for querying for multiple OTKs at once and always includes fallback
|
||||
keys in the response.
|
||||
"""
|
||||
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||
PATH = "/user/keys/claim"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Generate a count for each algorithm.
|
||||
key_query: List[Tuple[str, str, str, int]] = []
|
||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||
for device_id, algorithms in device_keys.items():
|
||||
counts = Counter(algorithms)
|
||||
for algorithm, count in counts.items():
|
||||
key_query.append((user_id, device_id, algorithm, count))
|
||||
|
||||
response = await self.handler.on_claim_client_keys(
|
||||
key_query, always_include_fallback_keys=True
|
||||
)
|
||||
return 200, response
|
||||
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
|
||||
PATH = "/get_missing_events/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
@@ -586,6 +646,7 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
|
||||
|
||||
class On3pidBindServlet(BaseFederationServerServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@@ -618,6 +679,7 @@ class On3pidBindServlet(BaseFederationServerServlet):
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@@ -640,6 +702,7 @@ class FederationVersionServlet(BaseFederationServlet):
|
||||
|
||||
class FederationRoomHierarchyServlet(BaseFederationServlet):
|
||||
PATH = "/hierarchy/(?P<room_id>[^/]*)"
|
||||
CATEGORY = "Federation requests"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -672,6 +735,7 @@ class RoomComplexityServlet(BaseFederationServlet):
|
||||
|
||||
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX
|
||||
CATEGORY = "Federation requests (unstable)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -757,6 +821,7 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationClientKeysQueryServlet,
|
||||
FederationUserDevicesQueryServlet,
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
FederationVersionServlet,
|
||||
|
||||
@@ -15,9 +15,7 @@
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.web.http import Request
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
@@ -30,25 +28,17 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Types for callbacks to be registered via the module api
|
||||
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
||||
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
||||
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
||||
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
||||
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
||||
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
||||
|
||||
|
||||
class AccountValidityHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.config = hs.config
|
||||
self.store = self.hs.get_datastores().main
|
||||
self.send_email_handler = self.hs.get_send_email_handler()
|
||||
self.clock = self.hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.send_email_handler = hs.get_send_email_handler()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._app_name = self.hs.config.email.email_app_name
|
||||
self._app_name = hs.config.email.email_app_name
|
||||
self._module_api_callbacks = hs.get_module_api_callbacks().account_validity
|
||||
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
@@ -78,69 +68,6 @@ class AccountValidityHandler:
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
|
||||
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
|
||||
self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
|
||||
self._on_legacy_send_mail_callback: Optional[
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK
|
||||
] = None
|
||||
self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
|
||||
|
||||
# The legacy admin requests callback isn't a protected attribute because we need
|
||||
# to access it from the admin servlet, which is outside of this handler.
|
||||
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
|
||||
|
||||
def register_account_validity_callbacks(
|
||||
self,
|
||||
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
|
||||
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
|
||||
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
||||
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
||||
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if is_user_expired is not None:
|
||||
self._is_user_expired_callbacks.append(is_user_expired)
|
||||
|
||||
if on_user_registration is not None:
|
||||
self._on_user_registration_callbacks.append(on_user_registration)
|
||||
|
||||
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
|
||||
# an admin one). As part of moving the feature into a module, we need to change
|
||||
# the path from /_matrix/client/unstable/account_validity/... to
|
||||
# /_synapse/client/account_validity, because:
|
||||
#
|
||||
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
|
||||
# * the way we register servlets means that modules can't register resources
|
||||
# under /_matrix/client
|
||||
#
|
||||
# We need to allow for a transition period between the old and new endpoints
|
||||
# in order to allow for clients to update (and for emails to be processed).
|
||||
#
|
||||
# Once the email-account-validity module is loaded, it will take control of account
|
||||
# validity by moving the rows from our `account_validity` table into its own table.
|
||||
#
|
||||
# Therefore, we need to allow modules (in practice just the one implementing the
|
||||
# email-based account validity) to temporarily hook into the legacy endpoints so we
|
||||
# can route the traffic coming into the old endpoints into the module, which is
|
||||
# why we have the following three temporary hooks.
|
||||
if on_legacy_send_mail is not None:
|
||||
if self._on_legacy_send_mail_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_send_mail twice")
|
||||
|
||||
self._on_legacy_send_mail_callback = on_legacy_send_mail
|
||||
|
||||
if on_legacy_renew is not None:
|
||||
if self._on_legacy_renew_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_renew twice")
|
||||
|
||||
self._on_legacy_renew_callback = on_legacy_renew
|
||||
|
||||
if on_legacy_admin_request is not None:
|
||||
if self.on_legacy_admin_request_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_admin_request twice")
|
||||
|
||||
self.on_legacy_admin_request_callback = on_legacy_admin_request
|
||||
|
||||
async def is_user_expired(self, user_id: str) -> bool:
|
||||
"""Checks if a user has expired against third-party modules.
|
||||
|
||||
@@ -150,7 +77,7 @@ class AccountValidityHandler:
|
||||
Returns:
|
||||
Whether the user has expired.
|
||||
"""
|
||||
for callback in self._is_user_expired_callbacks:
|
||||
for callback in self._module_api_callbacks.is_user_expired_callbacks:
|
||||
expired = await delay_cancellation(callback(user_id))
|
||||
if expired is not None:
|
||||
return expired
|
||||
@@ -168,7 +95,7 @@ class AccountValidityHandler:
|
||||
Args:
|
||||
user_id: The ID of the newly registered user.
|
||||
"""
|
||||
for callback in self._on_user_registration_callbacks:
|
||||
for callback in self._module_api_callbacks.on_user_registration_callbacks:
|
||||
await callback(user_id)
|
||||
|
||||
@wrap_as_background_process("send_renewals")
|
||||
@@ -198,8 +125,8 @@ class AccountValidityHandler:
|
||||
"""
|
||||
# If a module supports sending a renewal email from here, do that, otherwise do
|
||||
# the legacy dance.
|
||||
if self._on_legacy_send_mail_callback is not None:
|
||||
await self._on_legacy_send_mail_callback(user_id)
|
||||
if self._module_api_callbacks.on_legacy_send_mail_callback is not None:
|
||||
await self._module_api_callbacks.on_legacy_send_mail_callback(user_id)
|
||||
return
|
||||
|
||||
if not self._account_validity_renew_by_email_enabled:
|
||||
@@ -336,8 +263,10 @@ class AccountValidityHandler:
|
||||
"""
|
||||
# If a module supports triggering a renew from here, do that, otherwise do the
|
||||
# legacy dance.
|
||||
if self._on_legacy_renew_callback is not None:
|
||||
return await self._on_legacy_renew_callback(renewal_token)
|
||||
if self._module_api_callbacks.on_legacy_renew_callback is not None:
|
||||
return await self._module_api_callbacks.on_legacy_renew_callback(
|
||||
renewal_token
|
||||
)
|
||||
|
||||
try:
|
||||
(
|
||||
|
||||
@@ -12,7 +12,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -829,3 +839,125 @@ class ApplicationServicesHandler:
|
||||
if unknown_user:
|
||||
return await self.query_user_exists(user_id)
|
||||
return True
|
||||
|
||||
async def claim_e2e_one_time_keys(
|
||||
self, query: Iterable[Tuple[str, str, str, int]]
|
||||
) -> Tuple[
|
||||
Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
|
||||
]:
|
||||
"""Claim one time keys from application services.
|
||||
|
||||
Users which are exclusively owned by an application service are sent a
|
||||
key claim request to check if the application service provides keys
|
||||
directly.
|
||||
|
||||
Args:
|
||||
query: An iterable of tuples of (user ID, device ID, algorithm).
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
A map of user ID -> a map device ID -> a map of key ID -> JSON.
|
||||
|
||||
A copy of the input which has not been fulfilled (either because
|
||||
they are not appservice users or the appservice does not support
|
||||
providing OTKs).
|
||||
"""
|
||||
services = self.store.get_app_services()
|
||||
|
||||
# Partition the users by appservice.
|
||||
query_by_appservice: Dict[str, List[Tuple[str, str, str, int]]] = {}
|
||||
missing = []
|
||||
for user_id, device, algorithm, count in query:
|
||||
if not self.store.get_if_app_services_interested_in_user(user_id):
|
||||
missing.append((user_id, device, algorithm, count))
|
||||
continue
|
||||
|
||||
# Find the associated appservice.
|
||||
for service in services:
|
||||
if service.is_exclusive_user(user_id):
|
||||
query_by_appservice.setdefault(service.id, []).append(
|
||||
(user_id, device, algorithm, count)
|
||||
)
|
||||
continue
|
||||
|
||||
# Query each service in parallel.
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
self.appservice_api.claim_client_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Patch together the results -- they are all independent (since they
|
||||
# require exclusive control over the users, which is the outermost key).
|
||||
claimed_keys: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for success, result in results:
|
||||
if success:
|
||||
claimed_keys.update(result[0])
|
||||
missing.extend(result[1])
|
||||
|
||||
return claimed_keys, missing
|
||||
|
||||
async def query_keys(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
|
||||
"""Query application services for device keys.
|
||||
|
||||
Users which are exclusively owned by an application service are queried
|
||||
for keys to check if the application service provides keys directly.
|
||||
|
||||
Args:
|
||||
query: map from user_id to a list of devices to query
|
||||
|
||||
Returns:
|
||||
A map from user_id -> device_id -> device details
|
||||
"""
|
||||
services = self.store.get_app_services()
|
||||
|
||||
# Partition the users by appservice.
|
||||
query_by_appservice: Dict[str, Dict[str, List[str]]] = {}
|
||||
for user_id, device_ids in query.items():
|
||||
if not self.store.get_if_app_services_interested_in_user(user_id):
|
||||
continue
|
||||
|
||||
# Find the associated appservice.
|
||||
for service in services:
|
||||
if service.is_exclusive_user(user_id):
|
||||
query_by_appservice.setdefault(service.id, {})[user_id] = (
|
||||
device_ids or []
|
||||
)
|
||||
continue
|
||||
|
||||
# Query each service in parallel.
|
||||
results = await make_deferred_yieldable(
|
||||
defer.DeferredList(
|
||||
[
|
||||
run_in_background(
|
||||
self.appservice_api.query_keys,
|
||||
# We know this must be an app service.
|
||||
self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
|
||||
service_query,
|
||||
)
|
||||
for service_id, service_query in query_by_appservice.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Patch together the results -- they are all independent (since they
|
||||
# require exclusive control over the users). They get returned as a single
|
||||
# dictionary.
|
||||
key_queries: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
|
||||
for success, result in results:
|
||||
if success:
|
||||
key_queries.update(result)
|
||||
|
||||
return key_queries
|
||||
|
||||
@@ -212,7 +212,7 @@ class AuthHandler:
|
||||
self._password_enabled_for_login = hs.config.auth.password_enabled_for_login
|
||||
self._password_enabled_for_reauth = hs.config.auth.password_enabled_for_reauth
|
||||
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
|
||||
self._third_party_rules = hs.get_third_party_event_rules()
|
||||
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
|
||||
|
||||
# Ratelimiter for failed auth during UIA. Uses same ratelimit config
|
||||
# as per `rc_login.failed_attempts`.
|
||||
@@ -1504,8 +1504,10 @@ class AuthHandler:
|
||||
)
|
||||
|
||||
# delete pushers associated with this access token
|
||||
# XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
|
||||
# background update completes.
|
||||
if token.token_id is not None:
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
|
||||
token.user_id, (token.token_id,)
|
||||
)
|
||||
|
||||
@@ -1535,7 +1537,9 @@ class AuthHandler:
|
||||
)
|
||||
|
||||
# delete pushers associated with the access tokens
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
# XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
|
||||
# background update completes.
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
|
||||
user_id, (token_id for _, token_id, _ in tokens_and_devices)
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user