Compare commits
132 Commits
shay/batch
...
erikj/tree
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dcd574b89c | ||
|
|
c03785e121 | ||
|
|
b9cdf3d85e | ||
|
|
18ac015ecd | ||
|
|
4874d6320a | ||
|
|
e863a99d8d | ||
|
|
f685318c2a | ||
|
|
890e5f610e | ||
|
|
acea4d7a2f | ||
|
|
fac8a38525 | ||
|
|
6acb6d772a | ||
|
|
656dce4baf | ||
|
|
058789bada | ||
|
|
d32820c7be | ||
|
|
6ac35667af | ||
|
|
c61f1ef716 | ||
|
|
71f3e53ad0 | ||
|
|
781b14ec69 | ||
|
|
854a6884d8 | ||
|
|
6a41e5022e | ||
|
|
89ee169556 | ||
|
|
7aefc7e9fc | ||
|
|
e8bce8999f | ||
|
|
4569eda944 | ||
|
|
ecb6fe9d9c | ||
|
|
c29e2c6306 | ||
|
|
13aa29db1d | ||
|
|
99d1897078 | ||
|
|
807f077db2 | ||
|
|
e860316818 | ||
|
|
8c5b8e6d40 | ||
|
|
5b0dcda7f0 | ||
|
|
c7e29ca277 | ||
|
|
72f3e38137 | ||
|
|
9ccc09fe9e | ||
|
|
dd51828120 | ||
|
|
3da6450327 | ||
|
|
8f10c8b054 | ||
|
|
1183c372fa | ||
|
|
d56f48038a | ||
|
|
d748bbc8f8 | ||
|
|
f792dd74e1 | ||
|
|
2dad42a9fb | ||
|
|
58383c18bd | ||
|
|
7a7ee3d6b8 | ||
|
|
105ab1c3d2 | ||
|
|
7d24662fdd | ||
|
|
09de2aecb0 | ||
|
|
39cde585bf | ||
|
|
c2e06c36d4 | ||
|
|
f6c74d1cb2 | ||
|
|
9af2be192a | ||
|
|
3b4e150868 | ||
|
|
f38d7d79c8 | ||
|
|
4ae967cf63 | ||
|
|
7f78b383ca | ||
|
|
df390a8e67 | ||
|
|
972743051b | ||
|
|
6d47b7e325 | ||
|
|
9b4cb1e2ed | ||
|
|
9cae44f49e | ||
|
|
7eb7460042 | ||
|
|
6d7523ef14 | ||
|
|
1799a54a54 | ||
|
|
da933bfc3f | ||
|
|
ececb2d6cb | ||
|
|
7c005b279e | ||
|
|
706b6a1ebb | ||
|
|
a6514792b2 | ||
|
|
1526ff389f | ||
|
|
640cb3c81c | ||
|
|
22036f038e | ||
|
|
6e0cb8de79 | ||
|
|
d988fb5e7b | ||
|
|
8f77418edd | ||
|
|
78867f302f | ||
|
|
8718322130 | ||
|
|
8d133a8464 | ||
|
|
e1b15f25f3 | ||
|
|
78e23eea05 | ||
|
|
ae22e6e94f | ||
|
|
01a0527892 | ||
|
|
e7132c3f81 | ||
|
|
75888c2b1f | ||
|
|
115f0eb233 | ||
|
|
c15e9a0edb | ||
|
|
a84744fba0 | ||
|
|
7f44f3aee3 | ||
|
|
f0d18772f3 | ||
|
|
e6b5ca1a9f | ||
|
|
618e4ab81b | ||
|
|
d8cc86eff4 | ||
|
|
1a8cd8bec0 | ||
|
|
882277008c | ||
|
|
d63814fd73 | ||
|
|
945a0928c7 | ||
|
|
f844b470f6 | ||
|
|
5cb6ad3b87 | ||
|
|
1eed795fc5 | ||
|
|
258b5285b6 | ||
|
|
63cc56affa | ||
|
|
b5ab2c428a | ||
|
|
634359b083 | ||
|
|
64dd8a9c6e | ||
|
|
36097e88c4 | ||
|
|
e226513c0f | ||
|
|
4d1de6a944 | ||
|
|
4a333d638b | ||
|
|
2cecb782c4 | ||
|
|
ae54a94063 | ||
|
|
6816300588 | ||
|
|
2cc592584a | ||
|
|
fb66fae84b | ||
|
|
95f7a65a56 | ||
|
|
683bf4af4b | ||
|
|
8e38d74313 | ||
|
|
b7f5a3aaa6 | ||
|
|
cc45808ea3 | ||
|
|
fec1e2cb52 | ||
|
|
639780fc15 | ||
|
|
2e7c86c129 | ||
|
|
334a8324d3 | ||
|
|
a3623af74e | ||
|
|
3a4f80f8c6 | ||
|
|
13ca8bb2fc | ||
|
|
b2c2b03079 | ||
|
|
d10a85ec9e | ||
|
|
e9a4343cb2 | ||
|
|
21447c9102 | ||
|
|
e9cbddc8e7 | ||
|
|
0cf48f2d5f | ||
|
|
22d46db0ea |
@@ -64,7 +64,7 @@ if not IS_PR:
|
||||
{
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"postgres-version": "15",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
[*.{py,pyi}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
45
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -74,6 +74,36 @@ body:
|
||||
- Debian packages from packages.matrix.org
|
||||
- pip (from PyPI)
|
||||
- Other (please mention below)
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: database
|
||||
attributes:
|
||||
label: Database
|
||||
description: |
|
||||
Are you using SQLite or PostgreSQL? What's the version of your database?
|
||||
|
||||
If PostgreSQL, please also answer the following:
|
||||
- are you using a single PostgreSQL server
|
||||
or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
|
||||
- have you previously ported from SQLite using the Synapse "portdb" script?
|
||||
- have you previously restored from a backup?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: workers
|
||||
attributes:
|
||||
label: Workers
|
||||
description: |
|
||||
Are you running a single Synapse process, or are you running
|
||||
[2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
|
||||
options:
|
||||
- Single process
|
||||
- Multiple workers
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: platform
|
||||
attributes:
|
||||
@@ -83,17 +113,28 @@ body:
|
||||
e.g. distro, hardware, if it's running in a vm/container, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Do you have any unusual config options turned on? If so, please provide details.
|
||||
|
||||
- Experimental or undocumented features
|
||||
- [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
|
||||
- [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
|
||||
- [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
**Bug reports are usually very difficult to diagnose without logging.**
|
||||
**Bug reports are usually impossible to diagnose without logging.**
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -18,5 +18,6 @@ updates:
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
versioning-strategy: "lockfile-only"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||
- name: 📥 Download artifact
|
||||
uses: dawidd6/action-download-artifact@46b4ae883bf0726f5949d025d31cb62c7a5ac70c # v2.24.0
|
||||
uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
|
||||
with:
|
||||
workflow: docs-pr.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
2
.github/workflows/docs-pr.yaml
vendored
2
.github/workflows/docs-pr.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: book
|
||||
path: book
|
||||
|
||||
8
.github/workflows/latest_deps.yml
vendored
8
.github/workflows/latest_deps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -208,7 +208,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
74
.github/workflows/push_complement_image.yml
vendored
Normal file
74
.github/workflows/push_complement_image.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# This task does not run complement tests, see tests.yaml instead.
|
||||
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
|
||||
|
||||
name: Store complement-synapse image in ghcr.io
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
default: 'develop'
|
||||
type: choice
|
||||
options:
|
||||
- develop
|
||||
- master
|
||||
|
||||
# Only run this action once per pull request/branch; restart if a new commit arrives.
|
||||
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
|
||||
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and push complement image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout specific branch (debug build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
- name: Checkout clean copy of develop (scheduled build)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'schedule'
|
||||
with:
|
||||
ref: develop
|
||||
- name: Checkout clean copy of master (on-push)
|
||||
uses: actions/checkout@v3
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
|
||||
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
|
||||
type=sha,format=long
|
||||
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
|
||||
run: scripts-dev/complement.sh --build-only
|
||||
- name: Tag and push generated image
|
||||
run: |
|
||||
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
|
||||
echo "tag and push $TAG"
|
||||
docker tag complement-synapse $TAG
|
||||
docker push $TAG
|
||||
done
|
||||
50
.github/workflows/tests.yml
vendored
50
.github/workflows/tests.yml
vendored
@@ -27,6 +27,7 @@ jobs:
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -102,13 +103,35 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy
|
||||
- run: cargo clippy -- -D warnings
|
||||
|
||||
# We also lint against a nightly rustc so that we can lint the benchmark
|
||||
# suite, which requires a nightly compiler.
|
||||
lint-clippy-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: nightly-2022-12-01
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy --all-features -- -D warnings
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -122,7 +145,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
components: rustfmt
|
||||
@@ -184,7 +207,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -228,7 +251,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -346,7 +369,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -412,7 +435,7 @@ jobs:
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
postgres-version: "14"
|
||||
postgres-version: "15"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -430,6 +453,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Add PostgreSQL apt repository
|
||||
# We need a version of pg_dump that can handle the version of
|
||||
# PostgreSQL being tested against. The Ubuntu package repository lags
|
||||
# behind new releases, so we have to use the PostreSQL apt repository.
|
||||
# Steps taken from https://www.postgresql.org/download/linux/ubuntu/
|
||||
run: |
|
||||
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -480,7 +512,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -508,7 +540,7 @@ jobs:
|
||||
# There don't seem to be versioned releases of this action per se: for each rust
|
||||
# version there is a branch which gets constantly rebased on top of master.
|
||||
# We pin to a specific commit for paranoia's sake.
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
8
.github/workflows/twisted_trunk.yml
vendored
8
.github/workflows/twisted_trunk.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
|
||||
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
|
||||
with:
|
||||
toolchain: stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -174,7 +174,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
155
CHANGES.md
155
CHANGES.md
@@ -1,3 +1,158 @@
|
||||
Synapse 1.73.0rc2 (2022-12-01)
|
||||
==============================
|
||||
|
||||
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
|
||||
|
||||
|
||||
Synapse 1.73.0rc1 (2022-11-29)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
|
||||
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
|
||||
- Adds support for handling avatar in SSO login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
|
||||
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
|
||||
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
|
||||
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
|
||||
- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
|
||||
- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
|
||||
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
|
||||
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
|
||||
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
|
||||
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
|
||||
- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
|
||||
- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
|
||||
- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
|
||||
([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
|
||||
- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
|
||||
- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
|
||||
- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
|
||||
- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
|
||||
- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
|
||||
- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
|
||||
- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
|
||||
- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
|
||||
- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
|
||||
- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
|
||||
- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
|
||||
- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
|
||||
- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
|
||||
- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
|
||||
|
||||
|
||||
Synapse 1.72.0 (2022-11-22)
|
||||
===========================
|
||||
|
||||
Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
|
||||
|
||||
|
||||
Synapse 1.72.0rc1 (2022-11-16)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
|
||||
- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
|
||||
- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
|
||||
- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
|
||||
- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
|
||||
- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
|
||||
- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
|
||||
- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
|
||||
- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
|
||||
- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
|
||||
- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
|
||||
- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
|
||||
- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
|
||||
- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
|
||||
- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
|
||||
- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
|
||||
- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
|
||||
- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
|
||||
- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
|
||||
- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
|
||||
- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
|
||||
- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
|
||||
- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
|
||||
- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
|
||||
- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
|
||||
- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
|
||||
- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
|
||||
- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
|
||||
- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
|
||||
- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
|
||||
|
||||
|
||||
Synapse 1.71.0 (2022-11-08)
|
||||
===========================
|
||||
|
||||
|
||||
20
Cargo.lock
generated
20
Cargo.lock
generated
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.4"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
|
||||
checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -343,9 +343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.87"
|
||||
version = "1.0.89"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
|
||||
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.102"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
|
||||
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -3,3 +3,7 @@
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
|
||||
[profile.dbgrelease]
|
||||
inherits = "release"
|
||||
debug = true
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Upload documentation PRs to Netlify.
|
||||
@@ -1 +0,0 @@
|
||||
Run unit tests against Python 3.11.
|
||||
@@ -1 +0,0 @@
|
||||
Add all Stream Writer worker types to configure_workers_and_start.py.
|
||||
1
changelog.d/14255.misc
Normal file
1
changelog.d/14255.misc
Normal file
@@ -0,0 +1 @@
|
||||
Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar).
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions.
|
||||
@@ -1 +0,0 @@
|
||||
Faster joins: do not block creation of or queries for room aliases during the resync.
|
||||
@@ -1 +0,0 @@
|
||||
Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de).
|
||||
@@ -1 +0,0 @@
|
||||
Switch to a maintained action for installing Rust in CI.
|
||||
@@ -1 +0,0 @@
|
||||
Add override ability to `complement.sh` command line script to request certain types of workers.
|
||||
@@ -1 +0,0 @@
|
||||
Bump flake8-bugbear from 22.9.23 to 22.10.27.
|
||||
@@ -1 +0,0 @@
|
||||
Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement.
|
||||
@@ -1 +0,0 @@
|
||||
Bump twisted from 22.8.0 to 22.10.0.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers.
|
||||
@@ -1 +0,0 @@
|
||||
Use a maintained Github action to install Rust.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation.
|
||||
@@ -1 +0,0 @@
|
||||
Fix refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility.
|
||||
@@ -1 +0,0 @@
|
||||
Upload documentation PRs to Netlify.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance.
|
||||
@@ -1 +0,0 @@
|
||||
Bump dawidd6/action-download-artifact from 2.15.0 to 2.24.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump regex from 1.6.0 to 1.7.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump pyo3 from 0.17.2 to 0.17.3.
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-setuptools from 65.5.0.1 to 65.5.0.2.
|
||||
@@ -1 +0,0 @@
|
||||
Bump pillow from 9.2.0 to 9.3.0.
|
||||
@@ -1 +0,0 @@
|
||||
Bump cryptography from 36.0.1 to 38.0.3.
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-pyyaml from 6.0.12 to 6.0.12.1.
|
||||
@@ -1 +0,0 @@
|
||||
Bump types-jsonschema from 4.4.6 to 4.17.0.0.
|
||||
@@ -1 +0,0 @@
|
||||
Remove support for PostgreSQL 10.
|
||||
1
changelog.d/14490.feature
Normal file
1
changelog.d/14490.feature
Normal file
@@ -0,0 +1 @@
|
||||
Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`.
|
||||
1
changelog.d/14493.doc
Normal file
1
changelog.d/14493.doc
Normal file
@@ -0,0 +1 @@
|
||||
Update worker settings for `pusher` and `federation_sender` functionality.
|
||||
1
changelog.d/14517.doc
Normal file
1
changelog.d/14517.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages.
|
||||
1
changelog.d/14525.feature
Normal file
1
changelog.d/14525.feature
Normal file
@@ -0,0 +1 @@
|
||||
Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`.
|
||||
1
changelog.d/14528.misc
Normal file
1
changelog.d/14528.misc
Normal file
@@ -0,0 +1 @@
|
||||
Share the `ClientRestResource` for both workers and the main process.
|
||||
1
changelog.d/14549.misc
Normal file
1
changelog.d/14549.misc
Normal file
@@ -0,0 +1 @@
|
||||
Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room.
|
||||
1
changelog.d/14551.feature
Normal file
1
changelog.d/14551.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add new `push.enabled` config option to allow opting out of push notification calculation.
|
||||
1
changelog.d/14568.misc
Normal file
1
changelog.d/14568.misc
Normal file
@@ -0,0 +1 @@
|
||||
Modernize unit tests configuration related to workers.
|
||||
1
changelog.d/14576.feature
Normal file
1
changelog.d/14576.feature
Normal file
@@ -0,0 +1 @@
|
||||
Advertise support for Matrix 1.5 on `/_matrix/client/versions`.
|
||||
1
changelog.d/14591.misc
Normal file
1
changelog.d/14591.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump jsonschema from 4.17.0 to 4.17.3.
|
||||
1
changelog.d/14592.bugfix
Normal file
1
changelog.d/14592.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances.
|
||||
1
changelog.d/14597.misc
Normal file
1
changelog.d/14597.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing type hints.
|
||||
1
changelog.d/14602.misc
Normal file
1
changelog.d/14602.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix Rust lint CI.
|
||||
1
changelog.d/14607.misc
Normal file
1
changelog.d/14607.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1.
|
||||
File diff suppressed because it is too large
Load Diff
24
debian/changelog
vendored
24
debian/changelog
vendored
@@ -1,3 +1,27 @@
|
||||
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
|
||||
|
||||
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.73.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.72.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
@@ -100,8 +100,6 @@ experimental_features:
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
{% endif %}
|
||||
# Enable jump to date endpoint
|
||||
msc3030_enabled: true
|
||||
# Filtering /messages by relation type.
|
||||
msc3874_enabled: true
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -58,10 +58,10 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
# have to attach by instance_map to the master process and have client endpoints.
|
||||
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.pusher",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
@@ -84,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
@@ -95,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.federation_sender",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
@@ -136,6 +140,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
@@ -159,6 +164,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/v1/timestamp_to_event/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
@@ -205,14 +211,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
),
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"account_data": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
@@ -326,7 +329,7 @@ def add_worker_roles_to_shared_config(
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
@@ -359,7 +362,7 @@ def add_worker_roles_to_shared_config(
|
||||
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convienent that the name of the worker type is the same as the event stream
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
@@ -371,10 +374,6 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
"""Starts Synapse and generates a basic homeserver config, which will later be
|
||||
@@ -483,8 +482,7 @@ def generate_worker_files(
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [coturn TURN server](setup/turn/coturn.md)
|
||||
- [eturnal TURN server](setup/turn/eturnal.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
|
||||
@@ -1197,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
```
|
||||
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/threepid/$medium/users/$address
|
||||
```
|
||||
|
||||
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "@hello:example.org"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `medium` - Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - Value of the third-party ID.
|
||||
|
||||
The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
|
||||
|
||||
**Errors**
|
||||
|
||||
Returns a `404` HTTP status code if no user was found, with a response body like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"errcode":"M_NOT_FOUND",
|
||||
"error":"User not found"
|
||||
}
|
||||
```
|
||||
|
||||
_Added in Synapse 1.72.0._
|
||||
|
||||
@@ -79,6 +79,9 @@ server {
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
|
||||
# Synapse responses may be chunked, which is an HTTP/1.1 feature.
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -84,7 +84,9 @@ file when you upgrade the Debian package to a later version.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
|
||||
Andrej Shadura maintains a
|
||||
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
|
||||
the Debian repositories.
|
||||
For `bookworm` and `sid`, it can be installed simply with:
|
||||
|
||||
```sh
|
||||
@@ -100,23 +102,27 @@ for information on how to use backports.
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
at this time, as they are [old and suffer from known security vulnerabilities](
|
||||
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
|
||||
).
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
Synapse is in the Fedora repositories as
|
||||
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
Additionally, Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
Synapse is in the OpenSUSE repositories as
|
||||
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
@@ -151,7 +157,8 @@ sudo pip install py-bcrypt
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
Synapse can be found in the void repositories as
|
||||
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
|
||||
188
docs/setup/turn/coturn.md
Normal file
188
docs/setup/turn/coturn.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# coturn TURN server
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian and Ubuntu based distributions
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
sudo apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure `coturn` to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, `coturn` can be configured to write to a
|
||||
logfile - check the example config file supplied with `coturn`.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure `coturn` to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `coturn` to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
sudo systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you built from source:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/turnserver -o
|
||||
```
|
||||
170
docs/setup/turn/eturnal.md
Normal file
170
docs/setup/turn/eturnal.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# eturnal TURN server
|
||||
|
||||
The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
|
||||
(which implements the TURN REST API).
|
||||
|
||||
## `eturnal` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The `eturnal` TURN server implementation is available from a variety of sources
|
||||
such as native package managers, binary packages, installation from source or
|
||||
[container image](https://eturnal.net/documentation/code/docker.html). They are
|
||||
all described [here](https://github.com/processone/eturnal#installation).
|
||||
|
||||
Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
|
||||
or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
|
||||
are available as well.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
|
||||
here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
|
||||
`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
|
||||
format. The file contains further explanations.
|
||||
|
||||
Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
|
||||
or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
|
||||
You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
|
||||
|
||||
`eturnal` runs out of the box with the default configuration. To enable TURN and
|
||||
to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
|
||||
must be edited:
|
||||
|
||||
1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
and eturnal's shared `secret` for authentication
|
||||
|
||||
Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
|
||||
configuration file:
|
||||
|
||||
```yaml
|
||||
secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
|
||||
```
|
||||
|
||||
One way to generate a `secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
1. Public IP address
|
||||
|
||||
If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
|
||||
however, it may also be configured by uncommenting and adjusting this line, so
|
||||
`eturnal` advertises that address to connecting clients:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure `eturnal` to advertise each available address:
|
||||
|
||||
```yaml
|
||||
relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
|
||||
relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. Logging
|
||||
|
||||
If `eturnal` was started by systemd, log files are written into the
|
||||
`/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
|
||||
instead, the `log_dir` option can be set to `stdout` in the configuration file.
|
||||
|
||||
1. Security considerations
|
||||
|
||||
Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
|
||||
|
||||
```yaml
|
||||
## Reject TURN relaying from/to the following addresses/networks:
|
||||
blacklist: # This is the default blacklist.
|
||||
- "127.0.0.0/8" # IPv4 loopback.
|
||||
- "::1" # IPv6 loopback.
|
||||
- recommended # Expands to a number of networks recommended to be
|
||||
# blocked, but includes private networks. Those
|
||||
# would have to be 'whitelist'ed if eturnal serves
|
||||
# local clients/peers within such networks.
|
||||
```
|
||||
|
||||
To whitelist IP addresses or specific (private) networks, you need to **add** a
|
||||
whitelist part into the configuration file, e.g.:
|
||||
|
||||
```yaml
|
||||
whitelist:
|
||||
- "192.168.0.0/16"
|
||||
- "203.0.113.113"
|
||||
- "2001:db8::/64"
|
||||
```
|
||||
|
||||
The more specific, the better.
|
||||
|
||||
1. TURNS (TURN via TLS/DTLS)
|
||||
|
||||
Also consider supporting TLS/DTLS. To do this, adjust the following settings
|
||||
in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
|
||||
|
||||
```yaml
|
||||
listen:
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: udp
|
||||
- ip: "::"
|
||||
port: 3478
|
||||
transport: tcp
|
||||
- ip: "::"
|
||||
port: 5349
|
||||
transport: tls
|
||||
|
||||
## TLS certificate/key files (must be readable by 'eturnal' user!):
|
||||
tls_crt_file: /etc/eturnal/tls/crt.pem
|
||||
tls_key_file: /etc/eturnal/tls/key.pem
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
|
||||
with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Firewall
|
||||
|
||||
Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. Reload/ restarting `eturnal`
|
||||
|
||||
Changes in the configuration file require `eturnal` to reload/ restart, this
|
||||
can be achieved by:
|
||||
|
||||
```sh
|
||||
eturnalctl reload
|
||||
```
|
||||
|
||||
`eturnal` performs a configuration check before actually reloading/ restarting
|
||||
and provides hints, if something is not correctly configured.
|
||||
|
||||
### eturnalctl opterations script
|
||||
|
||||
`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
|
||||
which can be called e.g. to check, whether the service is up, to restart the service,
|
||||
to query how many active sessions exist, to change logging behaviour and so on.
|
||||
|
||||
Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
|
||||
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the homeserver and the
|
||||
TURN server.
|
||||
|
||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||
This documentation provides two TURN server configuration examples:
|
||||
|
||||
* [coturn](setup/turn/coturn.md)
|
||||
* [eturnal](setup/turn/eturnal.md)
|
||||
|
||||
## Requirements
|
||||
|
||||
For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
|
||||
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
|
||||
|
||||
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
|
||||
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
|
||||
|
||||
## `coturn` setup
|
||||
|
||||
### Initial installation
|
||||
|
||||
The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
|
||||
|
||||
#### Debian installation
|
||||
|
||||
Just install the debian package:
|
||||
|
||||
```sh
|
||||
apt install coturn
|
||||
```
|
||||
|
||||
This will install and start a systemd service called `coturn`.
|
||||
|
||||
#### Source installation
|
||||
|
||||
1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
|
||||
|
||||
1. Configure it:
|
||||
|
||||
```sh
|
||||
./configure
|
||||
```
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
```sh
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
|
||||
lines, with example values, are:
|
||||
|
||||
```
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
```
|
||||
|
||||
See `turnserver.conf` for explanations of the options. One way to generate
|
||||
the `static-auth-secret` is with `pwgen`:
|
||||
|
||||
```sh
|
||||
pwgen -s 64 1
|
||||
```
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
```sh
|
||||
syslog
|
||||
```
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
|
||||
```
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
|
||||
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||
# this should be one of the turn server's listening IPs
|
||||
allowed-peer-ip=10.0.0.1
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
```
|
||||
|
||||
1. Also consider supporting TLS/DTLS. To do this, add the following settings
|
||||
to `turnserver.conf`:
|
||||
|
||||
```
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/path/to/fullchain.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
# Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
|
||||
#no-tls
|
||||
#no-dtls
|
||||
```
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uris` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
|
||||
not work with any Matrix client that uses Chromium's WebRTC library. This
|
||||
currently includes Element Android & iOS; for more details, see their
|
||||
[respective](https://github.com/vector-im/element-android/issues/1533)
|
||||
[issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
|
||||
[WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
|
||||
Consider using a ZeroSSL certificate for your TURN server as a working alternative.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. If your TURN server is behind NAT, the NAT gateway must have an external,
|
||||
publicly-reachable IP address. You must configure coturn to advertise that
|
||||
address to connecting clients:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
You may optionally limit the TURN server to listen only on the local
|
||||
address that is mapped by NAT to the external address:
|
||||
|
||||
```
|
||||
listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
|
||||
```
|
||||
|
||||
If your NAT gateway is reachable over both IPv4 and IPv6, you may
|
||||
configure coturn to advertise each available address:
|
||||
|
||||
```
|
||||
external-ip=EXTERNAL_NAT_IPv4_ADDRESS
|
||||
external-ip=EXTERNAL_NAT_IPv6_ADDRESS
|
||||
```
|
||||
|
||||
When advertising an external IPv6 address, ensure that the firewall and
|
||||
network settings of the system running your TURN server are configured to
|
||||
accept IPv6 traffic, and that the TURN server is listening on the local
|
||||
IPv6 address that is mapped by NAT to the external IPv6 address.
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
```sh
|
||||
systemctl restart coturn
|
||||
```
|
||||
|
||||
* If you installed from source:
|
||||
|
||||
```sh
|
||||
bin/turnserver -o
|
||||
```
|
||||
Afterwards, the homeserver needs some further configuration.
|
||||
|
||||
## Synapse setup
|
||||
|
||||
Your homeserver configuration file needs the following extra keys:
|
||||
|
||||
1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
|
||||
for your TURN server to be given out to your clients. Add separate
|
||||
entries for each transport your TURN server supports.
|
||||
2. "`turn_shared_secret`": This is the secret shared between your
|
||||
homeserver and your TURN server, so you should set it to the same
|
||||
string you used in turnserver.conf.
|
||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||
generated by your homeserver are valid for (in milliseconds).
|
||||
Shorter times offer less potential for abuse at the expense of
|
||||
increased traffic between web clients and your homeserver to
|
||||
refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
4. "`turn_allow_guests`": Whether to allow guest users to use the
|
||||
TURN server. This is enabled by default, as otherwise VoIP will
|
||||
not work reliably for guests. However, it does introduce a
|
||||
security risk as it lets guests connect to arbitrary endpoints
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
|
||||
2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
|
||||
3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
|
||||
4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
@@ -263,7 +69,7 @@ Here are a few things to try:
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
|
||||
* Try disabling TLS/DTLS listeners and enable only its (unencrypted)
|
||||
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
|
||||
voice & video WebRTC traffic is always encrypted.)
|
||||
|
||||
@@ -288,12 +94,19 @@ Here are a few things to try:
|
||||
|
||||
* ensure that your TURN server uses the NAT gateway as its default route.
|
||||
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
* Enable more verbose logging, in `coturn` via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
|
||||
|
||||
```yaml
|
||||
## Logging configuration:
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
@@ -317,7 +130,7 @@ Here are a few things to try:
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands:
|
||||
* Use the following shell commands for `coturn`:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
@@ -327,11 +140,16 @@ Here are a few things to try:
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
Or:
|
||||
or for `eturnal`
|
||||
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
```sh
|
||||
eturnalctl credentials
|
||||
```
|
||||
|
||||
|
||||
* Or (**coturn only**): Temporarily configure `coturn` to accept a static
|
||||
username/password. To do this, comment out `use-auth-secret` and
|
||||
`static-auth-secret` and add the following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
|
||||
@@ -88,6 +88,28 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.73.0
|
||||
|
||||
## Legacy Prometheus metric names have now been removed
|
||||
|
||||
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
|
||||
and offered an option to disable them.
|
||||
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
|
||||
|
||||
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
|
||||
This also means that the `enable_legacy_metrics` configuration option has been
|
||||
removed; it will no longer be possible to re-enable the legacy metric names.
|
||||
|
||||
If you use metrics and have not yet updated your Grafana dashboard(s),
|
||||
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
|
||||
to this version.
|
||||
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
|
||||
metric names which were missed when legacy metrics were disabled by default.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
|
||||
for more context.
|
||||
|
||||
|
||||
# Upgrading to v1.72.0
|
||||
|
||||
## Dropping support for PostgreSQL 10
|
||||
|
||||
@@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
|
||||
@@ -858,7 +858,7 @@ which are older than the room's maximum retention period. Synapse will also
|
||||
filter events received over federation so that events that should have been
|
||||
purged are ignored and not stored again.
|
||||
|
||||
The message retention policies feature is disabled by default. Please be advised
|
||||
The message retention policies feature is disabled by default. Please be advised
|
||||
that enabling this feature carries some risk. There are known bugs with the implementation
|
||||
which can cause database corruption. Setting retention to delete older history
|
||||
is less risky than deleting newer history but in general caution is advised when enabling this
|
||||
@@ -2437,31 +2437,6 @@ Example configuration:
|
||||
enable_metrics: true
|
||||
```
|
||||
---
|
||||
### `enable_legacy_metrics`
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
|
||||
**Will be removed in v1.73.0.**
|
||||
---
|
||||
### `sentry`
|
||||
|
||||
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
|
||||
@@ -2993,10 +2968,17 @@ Options for each entry include:
|
||||
|
||||
For the default provider, the following settings are available:
|
||||
|
||||
* subject_claim: name of the claim containing a unique identifier
|
||||
* `subject_claim`: name of the claim containing a unique identifier
|
||||
for the user. Defaults to 'sub', which OpenID Connect
|
||||
compliant providers should provide.
|
||||
|
||||
* `picture_claim`: name of the claim containing an url for the user's profile picture.
|
||||
Defaults to 'picture', which OpenID Connect compliant providers should provide
|
||||
and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
|
||||
|
||||
Currently only supported in monolithic (single-process) server configurations
|
||||
where the media repository runs within the Synapse process.
|
||||
|
||||
* `localpart_template`: Jinja2 template for the localpart of the MXID.
|
||||
If this is not set, the user will be prompted to choose their
|
||||
own username (see the documentation for the `sso_auth_account_details.html`
|
||||
@@ -3021,7 +3003,7 @@ Options for each entry include:
|
||||
which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
in the ID Token.
|
||||
|
||||
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
|
||||
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
|
||||
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
|
||||
Defaults to `false`.
|
||||
|
||||
@@ -3373,6 +3355,10 @@ Configuration settings related to push notifications
|
||||
This setting defines options for push notifications.
|
||||
|
||||
This option has a number of sub-options. They are as follows:
|
||||
* `enable_push`: Enables or disables push notification calculation. Note, disabling this will also
|
||||
stop unread counts being calculated for rooms. This mode of operation is intended
|
||||
for homeservers which may only have bots or appservice users connected, or are otherwise
|
||||
not interested in push/unread counters. This is enabled by default.
|
||||
* `include_content`: Clients requesting push notifications can either have the body of
|
||||
the message sent in the notification poke along with other details
|
||||
like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
@@ -3393,6 +3379,7 @@ This option has a number of sub-options. They are as follows:
|
||||
Example configuration:
|
||||
```yaml
|
||||
push:
|
||||
enable_push: true
|
||||
include_content: false
|
||||
group_unread_count_by_room: false
|
||||
```
|
||||
@@ -3438,7 +3425,7 @@ This option has the following sub-options:
|
||||
NB. If you set this to true, and the last time the user_directory search
|
||||
indexes were (re)built was before Synapse 1.44, you'll have to
|
||||
rebuild the indexes in order to search through all known users.
|
||||
|
||||
|
||||
These indexes are built the first time Synapse starts; admins can
|
||||
manually trigger a rebuild via the API following the instructions
|
||||
[for running background updates](../administration/admin_api/background_updates.md#run),
|
||||
@@ -3697,7 +3684,7 @@ As a result, the worker configuration is divided into two parts.
|
||||
|
||||
1. The first part (in this section of the manual) defines which shardable tasks
|
||||
are delegated to privileged workers. This allows unprivileged workers to make
|
||||
request a privileged worker to act on their behalf.
|
||||
requests to a privileged worker to act on their behalf.
|
||||
1. [The second part](#individual-worker-configuration)
|
||||
controls the behaviour of individual workers in isolation.
|
||||
|
||||
@@ -3709,7 +3696,7 @@ For guidance on setting up workers, see the [worker documentation](../../workers
|
||||
A shared secret used by the replication APIs on the main process to authenticate
|
||||
HTTP requests from workers.
|
||||
|
||||
The default, this value is omitted (equivalently `null`), which means that
|
||||
The default, this value is omitted (equivalently `null`), which means that
|
||||
traffic between the workers and the main process is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
@@ -3719,6 +3706,8 @@ worker_replication_secret: "secret_secret"
|
||||
---
|
||||
### `start_pushers`
|
||||
|
||||
Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Controls sending of push notifications on the main process. Set to `false`
|
||||
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
|
||||
|
||||
@@ -3729,25 +3718,30 @@ start_pushers: false
|
||||
---
|
||||
### `pusher_instances`
|
||||
|
||||
It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
|
||||
in which case the work is balanced across them. Use this setting to list the pushers by
|
||||
[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
|
||||
restarted after changing this option.
|
||||
It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
|
||||
and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `pusher_instances` map. Doing so will remove handling of this function from the main
|
||||
process. Multiple workers can be added to this map, in which case the work is balanced
|
||||
across them. Ensure the main process and all pusher workers are restarted after changing
|
||||
this option.
|
||||
|
||||
If no or only one pusher worker is configured, this setting is not necessary.
|
||||
The main process will send out push notifications by default if you do not disable
|
||||
it by setting [`start_pushers: false`](#start_pushers).
|
||||
|
||||
Example configuration:
|
||||
Example configuration for a single worker:
|
||||
```yaml
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
```
|
||||
And for multiple workers:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
---
|
||||
### `send_federation`
|
||||
|
||||
Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Controls sending of outbound federation transactions on the main process.
|
||||
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
|
||||
Defaults to `true`.
|
||||
@@ -3759,29 +3753,36 @@ send_federation: false
|
||||
---
|
||||
### `federation_sender_instances`
|
||||
|
||||
It is possible to run multiple
|
||||
[federation sender worker](../../workers.md#synapseappfederation_sender), in which
|
||||
case the work is balanced across them. Use this setting to list the senders.
|
||||
It is possible to scale the processes that handle sending outbound federation requests
|
||||
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
|
||||
a `federation_sender_instances` map. Doing so will remove handling of this function from
|
||||
the main process. Multiple workers can be added to this map, in which case the work is
|
||||
balanced across them.
|
||||
|
||||
This configuration setting must be shared between all federation sender workers, and if
|
||||
changed all federation sender workers must be stopped at the same time and then
|
||||
started, to ensure that all instances are running with the same config (otherwise
|
||||
This configuration setting must be shared between all workers handling federation
|
||||
sending, and if changed all federation sender workers must be stopped at the same time
|
||||
and then started, to ensure that all instances are running with the same config (otherwise
|
||||
events may be dropped).
|
||||
|
||||
Example configuration:
|
||||
Example configuration for a single worker:
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
```
|
||||
And for multiple workers:
|
||||
```yaml
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
|
||||
Example configuration:
|
||||
@@ -3893,10 +3894,30 @@ Example configuration:
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
@@ -3915,7 +3936,7 @@ worker_listeners:
|
||||
### `worker_daemonize`
|
||||
|
||||
Specifies whether the worker should be started as a daemon process.
|
||||
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
|
||||
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
|
||||
must be omitted or set to `false`.
|
||||
|
||||
Defaults to `false`.
|
||||
@@ -3927,11 +3948,11 @@ worker_daemonize: true
|
||||
---
|
||||
### `worker_pid_file`
|
||||
|
||||
When running a worker as a daemon, we need a place to store the
|
||||
When running a worker as a daemon, we need a place to store the
|
||||
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
|
||||
This option defines the location of that "pid file".
|
||||
|
||||
This option is required if `worker_daemonize` is `true` and ignored
|
||||
This option is required if `worker_daemonize` is `true` and ignored
|
||||
otherwise. It has no default.
|
||||
|
||||
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
|
||||
@@ -3981,4 +4002,3 @@ background_updates:
|
||||
min_batch_size: 10
|
||||
default_batch_size: 50
|
||||
```
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ In the config file for each worker, you must specify:
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
|
||||
For example:
|
||||
|
||||
@@ -191,6 +191,7 @@ information.
|
||||
^/_matrix/federation/(v1|v2)/send_leave/
|
||||
^/_matrix/federation/(v1|v2)/invite/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/timestamp_to_event/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/key/v2/query
|
||||
@@ -218,10 +219,10 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
|
||||
# Encryption requests
|
||||
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
||||
@@ -376,7 +377,7 @@ responsible for
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
@@ -504,6 +505,9 @@ worker application type.
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
It is likely this option will be deprecated in the future and is not recommended for new
|
||||
installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set
|
||||
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
|
||||
@@ -542,6 +546,9 @@ Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
It is likely this option will be deprecated in the future and not recommended for
|
||||
new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set
|
||||
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
|
||||
@@ -638,7 +645,9 @@ equivalent to `synapse.app.generic_worker`:
|
||||
* `synapse.app.client_reader`
|
||||
* `synapse.app.event_creator`
|
||||
* `synapse.app.federation_reader`
|
||||
* `synapse.app.federation_sender`
|
||||
* `synapse.app.frontend_proxy`
|
||||
* `synapse.app.pusher`
|
||||
* `synapse.app.synchrotron`
|
||||
|
||||
|
||||
|
||||
32
mypy.ini
32
mypy.ini
@@ -11,6 +11,7 @@ warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
strict_equality = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -58,21 +59,6 @@ exclude = (?x)
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
|tests/util/caches/test_deferred_cache.py
|
||||
|tests/util/caches/test_descriptors.py
|
||||
|tests/util/caches/test_response_cache.py
|
||||
|tests/util/caches/test_ttlcache.py
|
||||
|tests/util/test_async_helpers.py
|
||||
|tests/util/test_batching_queue.py
|
||||
|tests/util/test_dict_cache.py
|
||||
|tests/util/test_expiring_cache.py
|
||||
|tests/util/test_file_consumer.py
|
||||
|tests/util/test_linearizer.py
|
||||
|tests/util/test_logcontext.py
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -117,9 +103,15 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.state.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_id_generators]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.handlers.test_sso]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -129,9 +121,17 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
[mypy-tests.util.caches.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
||||
179
poetry.lock
generated
179
poetry.lock
generated
@@ -1,16 +1,16 @@
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
version = "21.4.0"
|
||||
version = "22.1.0"
|
||||
description = "Classes Without Boilerplate"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
python-versions = ">=3.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
|
||||
tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
|
||||
tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
|
||||
tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
|
||||
|
||||
[[package]]
|
||||
name = "Authlib"
|
||||
@@ -91,7 +91,7 @@ dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0
|
||||
|
||||
[[package]]
|
||||
name = "canonicaljson"
|
||||
version = "1.6.3"
|
||||
version = "1.6.4"
|
||||
description = "Canonical JSON"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -246,17 +246,17 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910
|
||||
|
||||
[[package]]
|
||||
name = "flake8"
|
||||
version = "4.0.1"
|
||||
version = "5.0.4"
|
||||
description = "the modular source code checker: pep8 pyflakes and co"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.6.1"
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""}
|
||||
mccabe = ">=0.6.0,<0.7.0"
|
||||
pycodestyle = ">=2.8.0,<2.9.0"
|
||||
pyflakes = ">=2.4.0,<2.5.0"
|
||||
importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""}
|
||||
mccabe = ">=0.7.0,<0.8.0"
|
||||
pycodestyle = ">=2.9.0,<2.10.0"
|
||||
pyflakes = ">=2.5.0,<2.6.0"
|
||||
|
||||
[[package]]
|
||||
name = "flake8-bugbear"
|
||||
@@ -275,7 +275,7 @@ dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"]
|
||||
|
||||
[[package]]
|
||||
name = "flake8-comprehensions"
|
||||
version = "3.8.0"
|
||||
version = "3.10.1"
|
||||
description = "A flake8 plugin to help you write better list/set/dict comprehensions."
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -306,7 +306,7 @@ smmap = ">=3.0.1,<6"
|
||||
|
||||
[[package]]
|
||||
name = "gitpython"
|
||||
version = "3.1.27"
|
||||
version = "3.1.29"
|
||||
description = "GitPython is a python library used to interact with Git repositories"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -452,7 +452,7 @@ i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.16.0"
|
||||
version = "4.17.3"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -555,11 +555,11 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
|
||||
|
||||
[[package]]
|
||||
name = "mccabe"
|
||||
version = "0.6.1"
|
||||
version = "0.7.0"
|
||||
description = "McCabe checker, plugin for flake8"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "msgpack"
|
||||
@@ -663,7 +663,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.12.56"
|
||||
version = "8.13.0"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -775,11 +775,11 @@ pyasn1 = ">=0.4.6,<0.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "pycodestyle"
|
||||
version = "2.8.0"
|
||||
version = "2.9.1"
|
||||
description = "Python style guide checker"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "pycparser"
|
||||
@@ -806,23 +806,23 @@ email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
version = "2.4.0"
|
||||
version = "2.5.0"
|
||||
description = "passive checker of Python programs"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "1.56"
|
||||
description = "Use the full Github API v3"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "1.57"
|
||||
description = "Use the full Github API v3"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
deprecated = "*"
|
||||
pyjwt = ">=2.0"
|
||||
pyjwt = ">=2.4.0"
|
||||
pynacl = ">=1.4.0"
|
||||
requests = ">=2.14.0"
|
||||
|
||||
@@ -888,17 +888,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "22.0.0"
|
||||
version = "22.1.0"
|
||||
description = "Python wrapper module around the OpenSSL library"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=35.0"
|
||||
cryptography = ">=38.0.0,<39"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "sphinx-rtd-theme"]
|
||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
|
||||
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
|
||||
|
||||
[[package]]
|
||||
@@ -1076,7 +1076,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.10.1"
|
||||
version = "1.11.1"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1098,6 +1098,7 @@ fastapi = ["fastapi (>=0.79.0)"]
|
||||
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
|
||||
httpx = ["httpx (>=0.16.0)"]
|
||||
pure-eval = ["asttokens", "executing", "pure-eval"]
|
||||
pymongo = ["pymongo (>=3.1)"]
|
||||
pyspark = ["pyspark (>=2.4.4)"]
|
||||
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
|
||||
rq = ["rq (>=0.6)"]
|
||||
@@ -1256,11 +1257,11 @@ python-versions = ">= 3.5"
|
||||
|
||||
[[package]]
|
||||
name = "towncrier"
|
||||
version = "21.9.0"
|
||||
version = "22.8.0"
|
||||
description = "Building newsfiles for your project."
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
click = "*"
|
||||
@@ -1268,7 +1269,7 @@ click-default-group = "*"
|
||||
incremental = "*"
|
||||
jinja2 = "*"
|
||||
setuptools = "*"
|
||||
tomli = {version = "*", markers = "python_version >= \"3.6\""}
|
||||
tomli = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["packaging"]
|
||||
@@ -1379,7 +1380,7 @@ python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "5.0.3"
|
||||
version = "5.0.3.1"
|
||||
description = "Typing stubs for bleach"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1423,7 +1424,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.17.0.0"
|
||||
version = "4.17.0.1"
|
||||
description = "Typing stubs for jsonschema"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1439,7 +1440,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.2.2.1"
|
||||
version = "9.3.0.1"
|
||||
description = "Typing stubs for Pillow"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1447,15 +1448,15 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.1"
|
||||
version = "2.9.21.2"
|
||||
description = "Typing stubs for psycopg2"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pyOpenSSL"
|
||||
version = "22.0.10"
|
||||
name = "types-pyopenssl"
|
||||
version = "22.1.0.2"
|
||||
description = "Typing stubs for pyOpenSSL"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1466,7 +1467,7 @@ types-cryptography = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pyyaml"
|
||||
version = "6.0.12.1"
|
||||
version = "6.0.12.2"
|
||||
description = "Typing stubs for PyYAML"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1485,7 +1486,7 @@ types-urllib3 = "<1.27"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "65.5.0.2"
|
||||
version = "65.5.0.3"
|
||||
description = "Typing stubs for setuptools"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1642,8 +1643,8 @@ content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
{file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
|
||||
{file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
|
||||
{file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
|
||||
{file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
|
||||
]
|
||||
Authlib = [
|
||||
{file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
|
||||
@@ -1704,8 +1705,8 @@ bleach = [
|
||||
{file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
|
||||
]
|
||||
canonicaljson = [
|
||||
{file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
|
||||
{file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
|
||||
{file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
|
||||
{file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
|
||||
]
|
||||
certifi = [
|
||||
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
|
||||
@@ -1831,16 +1832,16 @@ elementpath = [
|
||||
{file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
|
||||
]
|
||||
flake8 = [
|
||||
{file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
|
||||
{file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
|
||||
{file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"},
|
||||
{file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
|
||||
]
|
||||
flake8-bugbear = [
|
||||
{file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"},
|
||||
{file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"},
|
||||
]
|
||||
flake8-comprehensions = [
|
||||
{file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
|
||||
{file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
|
||||
{file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
|
||||
{file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"},
|
||||
]
|
||||
frozendict = [
|
||||
{file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
|
||||
@@ -1866,8 +1867,8 @@ gitdb = [
|
||||
{file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
|
||||
]
|
||||
gitpython = [
|
||||
{file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"},
|
||||
{file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"},
|
||||
{file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"},
|
||||
{file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"},
|
||||
]
|
||||
hiredis = [
|
||||
{file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"},
|
||||
@@ -2012,8 +2013,8 @@ jinja2 = [
|
||||
{file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
|
||||
]
|
||||
jsonschema = [
|
||||
{file = "jsonschema-4.16.0-py3-none-any.whl", hash = "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"},
|
||||
{file = "jsonschema-4.16.0.tar.gz", hash = "sha256:165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23"},
|
||||
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
|
||||
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
|
||||
]
|
||||
keyring = [
|
||||
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
|
||||
@@ -2146,8 +2147,8 @@ matrix-synapse-ldap3 = [
|
||||
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
|
||||
]
|
||||
mccabe = [
|
||||
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
|
||||
{file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
|
||||
{file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
|
||||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||
]
|
||||
msgpack = [
|
||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
|
||||
@@ -2257,8 +2258,8 @@ pathspec = [
|
||||
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
|
||||
]
|
||||
phonenumbers = [
|
||||
{file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"},
|
||||
{file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"},
|
||||
{file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
|
||||
{file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
|
||||
]
|
||||
pillow = [
|
||||
{file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
|
||||
@@ -2342,6 +2343,8 @@ prometheus-client = [
|
||||
psycopg2 = [
|
||||
{file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
|
||||
{file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
|
||||
{file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
|
||||
{file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
|
||||
{file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
|
||||
{file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
|
||||
{file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
|
||||
@@ -2367,8 +2370,8 @@ pyasn1-modules = [
|
||||
{file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
|
||||
]
|
||||
pycodestyle = [
|
||||
{file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
|
||||
{file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
|
||||
{file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"},
|
||||
{file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"},
|
||||
]
|
||||
pycparser = [
|
||||
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
|
||||
@@ -2413,12 +2416,12 @@ pydantic = [
|
||||
{file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"},
|
||||
]
|
||||
pyflakes = [
|
||||
{file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
|
||||
{file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
|
||||
{file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"},
|
||||
{file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
|
||||
]
|
||||
pygithub = [
|
||||
{file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"},
|
||||
{file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"},
|
||||
{file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
|
||||
{file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
|
||||
]
|
||||
pygments = [
|
||||
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
|
||||
@@ -2449,8 +2452,8 @@ pynacl = [
|
||||
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
|
||||
]
|
||||
pyopenssl = [
|
||||
{file = "pyOpenSSL-22.0.0-py2.py3-none-any.whl", hash = "sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0"},
|
||||
{file = "pyOpenSSL-22.0.0.tar.gz", hash = "sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf"},
|
||||
{file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"},
|
||||
{file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"},
|
||||
]
|
||||
pyparsing = [
|
||||
{file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
|
||||
@@ -2566,8 +2569,8 @@ semantic-version = [
|
||||
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"},
|
||||
{file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"},
|
||||
{file = "sentry-sdk-1.11.1.tar.gz", hash = "sha256:675f6279b6bb1fea09fd61751061f9a90dca3b5929ef631dd50dc8b3aeb245e9"},
|
||||
{file = "sentry_sdk-1.11.1-py2.py3-none-any.whl", hash = "sha256:8b4ff696c0bdcceb3f70bbb87a57ba84fd3168b1332d493fcd16c137f709578c"},
|
||||
]
|
||||
service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
@@ -2718,8 +2721,8 @@ tornado = [
|
||||
{file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
|
||||
]
|
||||
towncrier = [
|
||||
{file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
|
||||
{file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
|
||||
{file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
|
||||
{file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
|
||||
]
|
||||
treq = [
|
||||
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
|
||||
@@ -2778,8 +2781,8 @@ typed-ast = [
|
||||
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
|
||||
]
|
||||
types-bleach = [
|
||||
{file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"},
|
||||
{file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"},
|
||||
{file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
|
||||
{file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
|
||||
]
|
||||
types-commonmark = [
|
||||
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
|
||||
@@ -2798,36 +2801,36 @@ types-ipaddress = [
|
||||
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
|
||||
]
|
||||
types-jsonschema = [
|
||||
{file = "types-jsonschema-4.17.0.0.tar.gz", hash = "sha256:5b0875503218497cfc5c5ba92b458b1b8ec34a136e4a0d8c4f5889d59b1f5168"},
|
||||
{file = "types_jsonschema-4.17.0.0-py3-none-any.whl", hash = "sha256:520816acf40d1d7ce0981aa805862b27395491b4854188844945c674ad9173a1"},
|
||||
{file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"},
|
||||
{file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"},
|
||||
]
|
||||
types-opentracing = [
|
||||
{file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"},
|
||||
{file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
|
||||
]
|
||||
types-pillow = [
|
||||
{file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"},
|
||||
{file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"},
|
||||
{file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
|
||||
{file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
|
||||
]
|
||||
types-psycopg2 = [
|
||||
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
|
||||
{file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
|
||||
{file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"},
|
||||
{file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"},
|
||||
]
|
||||
types-pyOpenSSL = [
|
||||
{file = "types-pyOpenSSL-22.0.10.tar.gz", hash = "sha256:f943b834f5b97e5e808764c2f6e37be1a2e226c46792296f61558196acfcc3a1"},
|
||||
{file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"},
|
||||
types-pyopenssl = [
|
||||
{file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
|
||||
{file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
|
||||
]
|
||||
types-pyyaml = [
|
||||
{file = "types-PyYAML-6.0.12.1.tar.gz", hash = "sha256:70ccaafcf3fb404d57bffc1529fdd86a13e8b4f2cf9fc3ee81a6408ce0ad59d2"},
|
||||
{file = "types_PyYAML-6.0.12.1-py3-none-any.whl", hash = "sha256:aaf5e51444c13bd34104695a89ad9c48412599a4f615d65a60e649109714f608"},
|
||||
{file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"},
|
||||
{file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
|
||||
]
|
||||
types-requests = [
|
||||
{file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"},
|
||||
{file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"},
|
||||
]
|
||||
types-setuptools = [
|
||||
{file = "types-setuptools-65.5.0.2.tar.gz", hash = "sha256:9847de6d7087fb1dd4a88c2a21543d1b86a6179c36744f081974303fe2f30f50"},
|
||||
{file = "types_setuptools-65.5.0.2-py3-none-any.whl", hash = "sha256:2d33e4ef0d35cd2da48a143eb02184f58398381ddb1f772eff20ccc4126fec13"},
|
||||
{file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"},
|
||||
{file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"},
|
||||
]
|
||||
types-urllib3 = [
|
||||
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
|
||||
|
||||
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.71.0"
|
||||
version = "1.73.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -20,15 +20,15 @@ crate-type = ["lib", "cdylib"]
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.66"
|
||||
anyhow = "1.0.63"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
pyo3 = { version = "0.17.3", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
|
||||
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
|
||||
pyo3-log = "0.7.0"
|
||||
pythonize = "0.17.0"
|
||||
regex = "1.7.0"
|
||||
serde = { version = "1.0.147", features = ["derive"] }
|
||||
serde_json = "1.0.87"
|
||||
regex = "1.6.0"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
serde_json = "1.0.85"
|
||||
|
||||
[build-dependencies]
|
||||
blake2 = "0.10.4"
|
||||
|
||||
@@ -33,10 +33,12 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -67,10 +69,12 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -101,10 +105,12 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -135,10 +141,12 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Some(0),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
||||
77
rust/benches/tree_cache.rs
Normal file
77
rust/benches/tree_cache.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use synapse::tree_cache::TreeCache;
|
||||
use test::Bencher;
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[bench]
|
||||
fn bench_tree_cache_get_non_empty(b: &mut Bencher) {
|
||||
let mut cache: TreeCache<&str, &str> = TreeCache::new();
|
||||
|
||||
cache.set(["a", "b", "c", "d"], "f").unwrap();
|
||||
|
||||
b.iter(|| cache.get(&["a", "b", "c", "d"]));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tree_cache_get_empty(b: &mut Bencher) {
|
||||
let cache: TreeCache<&str, &str> = TreeCache::new();
|
||||
|
||||
b.iter(|| cache.get(&["a", "b", "c", "d"]));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tree_cache_set(b: &mut Bencher) {
|
||||
let mut cache: TreeCache<&str, &str> = TreeCache::new();
|
||||
|
||||
b.iter(|| cache.set(["a", "b", "c", "d"], "f").unwrap());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_tree_cache_length(b: &mut Bencher) {
|
||||
let mut cache: TreeCache<u32, u32> = TreeCache::new();
|
||||
|
||||
for c1 in 0..=10 {
|
||||
for c2 in 0..=10 {
|
||||
for c3 in 0..=10 {
|
||||
for c4 in 0..=10 {
|
||||
cache.set([c1, c2, c3, c4], 1).unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.iter(|| cache.len());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn tree_cache_iterate(b: &mut Bencher) {
|
||||
let mut cache: TreeCache<u32, u32> = TreeCache::new();
|
||||
|
||||
for c1 in 0..=10 {
|
||||
for c2 in 0..=10 {
|
||||
for c3 in 0..=10 {
|
||||
for c4 in 0..=10 {
|
||||
cache.set([c1, c2, c3, c4], 1).unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.iter(|| cache.items().count());
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use pyo3::prelude::*;
|
||||
|
||||
pub mod push;
|
||||
pub mod tree_cache;
|
||||
|
||||
/// Returns the hash of all the rust source files at the time it was compiled.
|
||||
///
|
||||
@@ -26,6 +27,7 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
|
||||
|
||||
push::register_module(py, m)?;
|
||||
tree_cache::binding::register_module(py, m)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -274,6 +274,156 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed(
|
||||
"global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
|
||||
),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
|
||||
priority_class: 1,
|
||||
@@ -302,6 +452,126 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.file")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.image")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.video")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
// MSC3933: Type changed from template rule - see MSC.
|
||||
pattern: Some(Cow::Borrowed("m.audio")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
// MSC3933: Add condition on top of template rule - see MSC.
|
||||
Condition::Known(KnownCondition::RoomVersionSupports {
|
||||
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
|
||||
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
|
||||
priority_class: 1,
|
||||
|
||||
@@ -29,6 +29,33 @@ use super::{
|
||||
lazy_static! {
|
||||
/// Used to parse the `is` clause in the room member count condition.
|
||||
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
|
||||
|
||||
/// Used to determine which MSC3931 room version feature flags are actually known to
|
||||
/// the push evaluator.
|
||||
static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
|
||||
RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
|
||||
];
|
||||
|
||||
/// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
|
||||
/// declare Extensible Events support ultimately *disable* push rules which do not declare
|
||||
/// *any* MSC3931 room_version_supports condition).
|
||||
static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
|
||||
"global/override/.m.rule.master".to_string(),
|
||||
"global/override/.m.rule.roomnotif".to_string(),
|
||||
"global/content/.m.rule.contains_user_name".to_string(),
|
||||
];
|
||||
}
|
||||
|
||||
enum RoomVersionFeatures {
|
||||
ExtensibleEvents,
|
||||
}
|
||||
|
||||
impl RoomVersionFeatures {
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows running a set of push rules against a particular event.
|
||||
@@ -57,11 +84,19 @@ pub struct PushRuleEvaluator {
|
||||
|
||||
/// If msc3664, push rules for related events, is enabled.
|
||||
related_event_match_enabled: bool,
|
||||
|
||||
/// If MSC3931 is applicable, the feature flags for the room version.
|
||||
room_version_feature_flags: Vec<String>,
|
||||
|
||||
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
|
||||
/// flag as MSC1767 (extensible events core).
|
||||
msc3931_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl PushRuleEvaluator {
|
||||
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[new]
|
||||
pub fn py_new(
|
||||
flattened_keys: BTreeMap<String, String>,
|
||||
@@ -70,6 +105,8 @@ impl PushRuleEvaluator {
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: Vec<String>,
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = flattened_keys
|
||||
.get("content.body")
|
||||
@@ -84,6 +121,8 @@ impl PushRuleEvaluator {
|
||||
sender_power_level,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
room_version_feature_flags,
|
||||
msc3931_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,7 +145,19 @@ impl PushRuleEvaluator {
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_id = &push_rule.rule_id().to_string();
|
||||
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
|
||||
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
|
||||
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
|
||||
let mut has_rver_condition = false;
|
||||
|
||||
for condition in push_rule.conditions.iter() {
|
||||
has_rver_condition |= matches!(
|
||||
condition,
|
||||
// per MSC3932, we just need *any* room version condition to match
|
||||
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
|
||||
);
|
||||
|
||||
match self.match_condition(condition, user_id, display_name) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => continue 'outer,
|
||||
@@ -117,6 +168,13 @@ impl PushRuleEvaluator {
|
||||
}
|
||||
}
|
||||
|
||||
// MSC3932: Disable push rules in extensible event-supporting room versions if they
|
||||
// don't describe *any* MSC3931 room version condition, unless the rule is on the
|
||||
// safe list.
|
||||
if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
|
||||
continue;
|
||||
}
|
||||
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
@@ -204,6 +262,15 @@ impl PushRuleEvaluator {
|
||||
false
|
||||
}
|
||||
}
|
||||
KnownCondition::RoomVersionSupports { feature } => {
|
||||
if !self.msc3931_enabled {
|
||||
false
|
||||
} else {
|
||||
let flag = feature.to_string();
|
||||
KNOWN_RVER_FLAGS.contains(&flag)
|
||||
&& self.room_version_feature_flags.contains(&flag)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
@@ -362,9 +429,63 @@ fn push_rule_evaluator() {
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
true,
|
||||
vec![],
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
|
||||
assert_eq!(result.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_requires_room_version_supports_condition() {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use crate::push::{PushRule, PushRules};
|
||||
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
false,
|
||||
flags,
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// first test: are the master and contains_user_name rules excluded from the "requires room
|
||||
// version condition" check?
|
||||
let mut result = evaluator.run(
|
||||
&FilteredPushRules::default(),
|
||||
Some("@bob:example.org"),
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 3);
|
||||
|
||||
// second test: if an appropriate push rule is in play, does it get handled?
|
||||
let custom_rule = PushRule {
|
||||
rule_id: Cow::from("global/underride/.org.example.extensible"),
|
||||
priority_class: 1, // underride
|
||||
conditions: Cow::from(vec![Condition::Known(
|
||||
KnownCondition::RoomVersionSupports {
|
||||
feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
|
||||
},
|
||||
)]),
|
||||
actions: Cow::from(vec![Action::Notify]),
|
||||
default: false,
|
||||
default_enabled: true,
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(result.len(), 1);
|
||||
}
|
||||
|
||||
@@ -277,6 +277,10 @@ pub enum KnownCondition {
|
||||
SenderNotificationPermission {
|
||||
key: Cow<'static, str>,
|
||||
},
|
||||
#[serde(rename = "org.matrix.msc3931.room_version_supports")]
|
||||
RoomVersionSupports {
|
||||
feature: Cow<'static, str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
@@ -408,6 +412,7 @@ pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
@@ -417,11 +422,13 @@ impl FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
msc1767_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +453,10 @@ impl FilteredPushRules {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
@@ -491,6 +502,18 @@ fn test_deserialize_unstable_msc3664_condition() {
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3931_condition() {
|
||||
let json =
|
||||
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_custom_condition() {
|
||||
let json = r#"{"kind":"custom_tag"}"#;
|
||||
|
||||
247
rust/src/tree_cache/binding.rs
Normal file
247
rust/src/tree_cache/binding.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
use std::hash::Hash;
|
||||
|
||||
use anyhow::Error;
|
||||
use pyo3::{
|
||||
pyclass, pymethods,
|
||||
types::{PyModule, PyTuple},
|
||||
IntoPy, PyAny, PyObject, PyResult, Python, ToPyObject,
|
||||
};
|
||||
|
||||
use super::TreeCache;
|
||||
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "tree_cache")?;
|
||||
child_module.add_class::<PythonTreeCache>()?;
|
||||
child_module.add_class::<StringTreeCache>()?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.tree_cache", child_module)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct HashablePyObject {
|
||||
obj: PyObject,
|
||||
hash: isize,
|
||||
}
|
||||
|
||||
impl HashablePyObject {
|
||||
pub fn new(obj: &PyAny) -> Result<Self, Error> {
|
||||
let hash = obj.hash()?;
|
||||
|
||||
Ok(HashablePyObject {
|
||||
obj: obj.to_object(obj.py()),
|
||||
hash,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for HashablePyObject {
|
||||
fn into_py(self, _: Python<'_>) -> PyObject {
|
||||
self.obj.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for &HashablePyObject {
|
||||
fn into_py(self, _: Python<'_>) -> PyObject {
|
||||
self.obj.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl ToPyObject for HashablePyObject {
|
||||
fn to_object(&self, _py: Python<'_>) -> PyObject {
|
||||
self.obj.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for HashablePyObject {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.hash.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for HashablePyObject {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
let equal = Python::with_gil(|py| {
|
||||
let result = self.obj.as_ref(py).eq(other.obj.as_ref(py));
|
||||
result.unwrap_or(false)
|
||||
});
|
||||
|
||||
equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for HashablePyObject {}
|
||||
|
||||
#[pyclass]
|
||||
struct PythonTreeCache(TreeCache<HashablePyObject, PyObject>);
|
||||
|
||||
#[pymethods]
|
||||
impl PythonTreeCache {
|
||||
#[new]
|
||||
fn new() -> Self {
|
||||
PythonTreeCache(Default::default())
|
||||
}
|
||||
|
||||
pub fn set(&mut self, key: &PyAny, value: PyObject) -> Result<(), Error> {
|
||||
let v: Vec<HashablePyObject> = key
|
||||
.iter()?
|
||||
.map(|obj| HashablePyObject::new(obj?))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
self.0.set(v, value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_node<'a>(
|
||||
&'a self,
|
||||
py: Python<'a>,
|
||||
key: &'a PyAny,
|
||||
) -> Result<Option<Vec<(&'a PyTuple, &'a PyObject)>>, Error> {
|
||||
let v: Vec<HashablePyObject> = key
|
||||
.iter()?
|
||||
.map(|obj| HashablePyObject::new(obj?))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
let Some(node) = self.0.get_node(v.clone())? else {
|
||||
return Ok(None)
|
||||
};
|
||||
|
||||
let items = node
|
||||
.items()
|
||||
.map(|(k, value)| {
|
||||
let vec = v.iter().chain(k.iter().map(|a| *a)).collect::<Vec<_>>();
|
||||
let nk = PyTuple::new(py, vec);
|
||||
(nk, value)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Some(items))
|
||||
}
|
||||
|
||||
pub fn get(&self, key: &PyAny) -> Result<Option<&PyObject>, Error> {
|
||||
let v: Vec<HashablePyObject> = key
|
||||
.iter()?
|
||||
.map(|obj| HashablePyObject::new(obj?))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
Ok(self.0.get(&v)?)
|
||||
}
|
||||
|
||||
pub fn pop_node<'a>(
|
||||
&'a mut self,
|
||||
py: Python<'a>,
|
||||
key: &'a PyAny,
|
||||
) -> Result<Option<Vec<(&'a PyTuple, PyObject)>>, Error> {
|
||||
let v: Vec<HashablePyObject> = key
|
||||
.iter()?
|
||||
.map(|obj| HashablePyObject::new(obj?))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
let Some(node) = self.0.pop_node(v.clone())? else {
|
||||
return Ok(None)
|
||||
};
|
||||
|
||||
let items = node
|
||||
.into_items()
|
||||
.map(|(k, value)| {
|
||||
let vec = v.iter().chain(k.iter()).collect::<Vec<_>>();
|
||||
let nk = PyTuple::new(py, vec);
|
||||
(nk, value)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Some(items))
|
||||
}
|
||||
|
||||
pub fn pop(&mut self, key: &PyAny) -> Result<Option<PyObject>, Error> {
|
||||
let v: Vec<HashablePyObject> = key
|
||||
.iter()?
|
||||
.map(|obj| HashablePyObject::new(obj?))
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
Ok(self.0.pop(&v)?)
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0.clear()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn values(&self) -> Vec<&PyObject> {
|
||||
self.0.values().collect()
|
||||
}
|
||||
|
||||
pub fn items(&self) -> Vec<(Vec<&HashablePyObject>, &PyObject)> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
struct StringTreeCache(TreeCache<String, String>);
|
||||
|
||||
#[pymethods]
|
||||
impl StringTreeCache {
|
||||
#[new]
|
||||
fn new() -> Self {
|
||||
StringTreeCache(Default::default())
|
||||
}
|
||||
|
||||
pub fn set(&mut self, key: &PyAny, value: String) -> Result<(), Error> {
|
||||
let key = key
|
||||
.iter()?
|
||||
.map(|o| o.expect("iter failed").extract().expect("not a string"));
|
||||
|
||||
self.0.set(key, value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pub fn get_node(&self, key: &PyAny) -> Result<Option<&TreeCacheNode<K, PyObject>>, Error> {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
pub fn get(&self, key: &PyAny) -> Result<Option<&String>, Error> {
|
||||
let key = key.iter()?.map(|o| {
|
||||
o.expect("iter failed")
|
||||
.extract::<String>()
|
||||
.expect("not a string")
|
||||
});
|
||||
|
||||
Ok(self.0.get(key)?)
|
||||
}
|
||||
|
||||
// pub fn pop_node(&mut self, key: &PyAny) -> Result<Option<TreeCacheNode<K, PyObject>>, Error> {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
pub fn pop(&mut self, key: Vec<String>) -> Result<Option<String>, Error> {
|
||||
Ok(self.0.pop(&key)?)
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.0.clear()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn values(&self) -> Vec<&String> {
|
||||
self.0.values().collect()
|
||||
}
|
||||
|
||||
pub fn items(&self) -> Vec<(Vec<&HashablePyObject>, &PyObject)> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
421
rust/src/tree_cache/mod.rs
Normal file
421
rust/src/tree_cache/mod.rs
Normal file
@@ -0,0 +1,421 @@
|
||||
use std::{borrow::Borrow, collections::HashMap, hash::Hash};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
pub mod binding;
|
||||
|
||||
pub enum TreeCacheNode<K, V> {
|
||||
Leaf(V),
|
||||
Branch(usize, HashMap<K, TreeCacheNode<K, V>>),
|
||||
}
|
||||
|
||||
impl<K, V> TreeCacheNode<K, V> {
|
||||
pub fn new_branch() -> Self {
|
||||
TreeCacheNode::Branch(0, Default::default())
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
match self {
|
||||
TreeCacheNode::Leaf(_) => 1,
|
||||
TreeCacheNode::Branch(size, _) => *size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Eq + Hash + 'a, V> TreeCacheNode<K, V> {
|
||||
pub fn set(
|
||||
&mut self,
|
||||
mut key: impl Iterator<Item = K>,
|
||||
value: V,
|
||||
) -> Result<(usize, usize), Error> {
|
||||
if let Some(k) = key.next() {
|
||||
match self {
|
||||
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
|
||||
TreeCacheNode::Branch(size, map) => {
|
||||
let node = map.entry(k).or_insert_with(TreeCacheNode::new_branch);
|
||||
let (added, removed) = node.set(key, value)?;
|
||||
|
||||
*size += added;
|
||||
*size -= removed;
|
||||
|
||||
Ok((added, removed))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let added = if let TreeCacheNode::Branch(_, map) = self {
|
||||
(1, map.len())
|
||||
} else {
|
||||
(0, 0)
|
||||
};
|
||||
|
||||
*self = TreeCacheNode::Leaf(value);
|
||||
|
||||
Ok(added)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pop<Q>(
|
||||
&mut self,
|
||||
current_key: Q,
|
||||
mut next_keys: impl Iterator<Item = Q>,
|
||||
) -> Result<Option<TreeCacheNode<K, V>>, Error>
|
||||
where
|
||||
Q: Borrow<K>,
|
||||
Q: Hash + Eq + 'a,
|
||||
{
|
||||
if let Some(next_key) = next_keys.next() {
|
||||
match self {
|
||||
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
|
||||
TreeCacheNode::Branch(size, map) => {
|
||||
let node = if let Some(node) = map.get_mut(current_key.borrow()) {
|
||||
node
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
if let Some(popped) = node.pop(next_key, next_keys)? {
|
||||
*size -= node.len();
|
||||
|
||||
Ok(Some(popped))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match self {
|
||||
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
|
||||
TreeCacheNode::Branch(size, map) => {
|
||||
if let Some(node) = map.remove(current_key.borrow()) {
|
||||
*size -= node.len();
|
||||
|
||||
Ok(Some(node))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn items(&'a self) -> impl Iterator<Item = (Vec<&K>, &V)> {
|
||||
// To avoid a lot of mallocs we guess the length of the key. Ideally
|
||||
// we'd know this.
|
||||
let capacity_guesstimate = 10;
|
||||
|
||||
let mut stack = vec![(Vec::with_capacity(capacity_guesstimate), self)];
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
while let Some((prefix, node)) = stack.pop() {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => return Some((prefix, value)),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
stack.extend(map.iter().map(|(k, v)| {
|
||||
let mut new_prefix = Vec::with_capacity(capacity_guesstimate);
|
||||
new_prefix.extend_from_slice(&prefix);
|
||||
new_prefix.push(k);
|
||||
(new_prefix, v)
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn values(&'a self) -> impl Iterator<Item = &V> {
|
||||
let mut stack = vec![self];
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
while let Some(node) = stack.pop() {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => return Some(value),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
stack.extend(map.iter().map(|(_k, v)| v));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Clone + Eq + Hash + 'a, V> TreeCacheNode<K, V> {
|
||||
pub fn into_items(self) -> impl Iterator<Item = (Vec<K>, V)> {
|
||||
let mut stack = vec![(Vec::new(), self)];
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
while let Some((prefix, node)) = stack.pop() {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => return Some((prefix, value)),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
stack.extend(map.into_iter().map(|(k, v)| {
|
||||
let mut prefix = prefix.clone();
|
||||
prefix.push(k);
|
||||
(prefix, v)
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for TreeCacheNode<K, V> {
|
||||
fn default() -> Self {
|
||||
TreeCacheNode::new_branch()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TreeCache<K, V> {
|
||||
root: TreeCacheNode<K, V>,
|
||||
}
|
||||
|
||||
impl<K, V> TreeCache<K, V> {
|
||||
pub fn new() -> Self {
|
||||
TreeCache {
|
||||
root: TreeCacheNode::new_branch(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Eq + Hash + 'a, V> TreeCache<K, V> {
|
||||
pub fn set(&mut self, key: impl IntoIterator<Item = K>, value: V) -> Result<(), Error> {
|
||||
self.root.set(key.into_iter(), value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_node<Q>(
|
||||
&self,
|
||||
key: impl IntoIterator<Item = Q>,
|
||||
) -> Result<Option<&TreeCacheNode<K, V>>, Error>
|
||||
where
|
||||
Q: Borrow<K>,
|
||||
Q: Hash + Eq + 'a,
|
||||
{
|
||||
let mut node = &self.root;
|
||||
|
||||
for k in key {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
node = if let Some(node) = map.get(k.borrow()) {
|
||||
node
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(node))
|
||||
}
|
||||
|
||||
pub fn get<Q>(&self, key: impl IntoIterator<Item = Q>) -> Result<Option<&V>, Error>
|
||||
where
|
||||
Q: Borrow<K>,
|
||||
Q: Hash + Eq + 'a,
|
||||
{
|
||||
if let Some(node) = self.get_node(key)? {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => Ok(Some(value)),
|
||||
TreeCacheNode::Branch(_, _) => bail!("Given key is too short"),
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pop_node<Q>(
|
||||
&mut self,
|
||||
key: impl IntoIterator<Item = Q>,
|
||||
) -> Result<Option<TreeCacheNode<K, V>>, Error>
|
||||
where
|
||||
Q: Borrow<K>,
|
||||
Q: Hash + Eq + 'a,
|
||||
{
|
||||
let mut key_iter = key.into_iter();
|
||||
|
||||
let k = if let Some(k) = key_iter.next() {
|
||||
k
|
||||
} else {
|
||||
let node = std::mem::replace(&mut self.root, TreeCacheNode::new_branch());
|
||||
return Ok(Some(node));
|
||||
};
|
||||
|
||||
self.root.pop(k, key_iter)
|
||||
}
|
||||
|
||||
pub fn pop(&mut self, key: &[K]) -> Result<Option<V>, Error> {
|
||||
if let Some(node) = self.pop_node(key)? {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => Ok(Some(value)),
|
||||
TreeCacheNode::Branch(_, _) => bail!("Given key is too short"),
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.root = TreeCacheNode::new_branch();
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
match self.root {
|
||||
TreeCacheNode::Leaf(_) => 1,
|
||||
TreeCacheNode::Branch(size, _) => size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn values(&self) -> impl Iterator<Item = &V> {
|
||||
let mut stack = vec![&self.root];
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
while let Some(node) = stack.pop() {
|
||||
match node {
|
||||
TreeCacheNode::Leaf(value) => return Some(value),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
stack.extend(map.values());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn items(&self) -> impl Iterator<Item = (Vec<&K>, &V)> {
|
||||
self.root.items()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for TreeCache<K, V> {
|
||||
fn default() -> Self {
|
||||
TreeCache::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn get_set() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
|
||||
assert_eq!(cache.get(&["a", "b"])?, Some(&"c"));
|
||||
|
||||
let node = cache.get_node(&["a"])?.unwrap();
|
||||
|
||||
match node {
|
||||
TreeCacheNode::Leaf(_) => bail!("expected branch"),
|
||||
TreeCacheNode::Branch(_, map) => {
|
||||
assert_eq!(map.len(), 1);
|
||||
assert!(map.contains_key("b"));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn length() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
cache.set(vec!["a", "b"], "d")?;
|
||||
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
cache.set(vec!["e", "f"], "g")?;
|
||||
|
||||
assert_eq!(cache.len(), 2);
|
||||
|
||||
cache.set(vec!["e", "h"], "i")?;
|
||||
|
||||
assert_eq!(cache.len(), 3);
|
||||
|
||||
cache.set(vec!["e"], "i")?;
|
||||
|
||||
assert_eq!(cache.len(), 2);
|
||||
|
||||
cache.pop_node(&["a"])?;
|
||||
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
|
||||
assert_eq!(cache.len(), 1);
|
||||
|
||||
cache.clear();
|
||||
|
||||
assert_eq!(cache.len(), 0);
|
||||
|
||||
assert_eq!(cache.get(&["a", "b"])?, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pop() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
assert_eq!(cache.pop(&["a", "b"])?, Some("c"));
|
||||
assert_eq!(cache.pop(&["a", "b"])?, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn values() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
|
||||
let expected = ["c"].iter().collect();
|
||||
assert_eq!(cache.values().collect::<BTreeSet<_>>(), expected);
|
||||
|
||||
cache.set(vec!["d", "e"], "f")?;
|
||||
|
||||
let expected = ["c", "f"].iter().collect();
|
||||
assert_eq!(cache.values().collect::<BTreeSet<_>>(), expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn items() -> Result<(), Error> {
|
||||
let mut cache = TreeCache::new();
|
||||
|
||||
cache.set(vec!["a", "b"], "c")?;
|
||||
cache.set(vec!["d", "e"], "f")?;
|
||||
|
||||
let expected = [(vec![&"a", &"b"], &"c"), (vec![&"d", &"e"], &"f")]
|
||||
.into_iter()
|
||||
.collect();
|
||||
assert_eq!(cache.items().collect::<BTreeSet<_>>(), expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ DISTS = (
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -162,9 +162,9 @@ else
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
#
|
||||
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
|
||||
# also only pass with monoliths, currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716,msc3030"
|
||||
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
|
||||
# currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -46,11 +46,12 @@ import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
from requests import PreparedRequest, Response
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3 import HTTPConnectionPool
|
||||
|
||||
# uncomment the following to enable debug logging of http requests
|
||||
# from httplib import HTTPConnection
|
||||
# from http.client import HTTPConnection
|
||||
# HTTPConnection.debuglevel = 1
|
||||
|
||||
|
||||
@@ -103,6 +104,7 @@ def request(
|
||||
destination: str,
|
||||
path: str,
|
||||
content: Optional[str],
|
||||
verify_tls: bool,
|
||||
) -> requests.Response:
|
||||
if method is None:
|
||||
if content is None:
|
||||
@@ -141,7 +143,6 @@ def request(
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers: Dict[str, str] = {
|
||||
"Host": destination,
|
||||
"Authorization": authorization_headers[0],
|
||||
}
|
||||
|
||||
@@ -152,7 +153,7 @@ def request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers=headers,
|
||||
verify=False,
|
||||
verify=verify_tls,
|
||||
data=content,
|
||||
stream=True,
|
||||
)
|
||||
@@ -202,6 +203,12 @@ def main() -> None:
|
||||
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"--insecure",
|
||||
action="store_true",
|
||||
help="Disable TLS certificate verification",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
)
|
||||
@@ -227,6 +234,7 @@ def main() -> None:
|
||||
args.destination,
|
||||
args.path,
|
||||
content=args.body,
|
||||
verify_tls=not args.insecure,
|
||||
)
|
||||
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
@@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
@staticmethod
|
||||
def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
|
||||
if s[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return s, 8448
|
||||
def send(
|
||||
self,
|
||||
request: PreparedRequest,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Response:
|
||||
# overrides the send() method in the base class.
|
||||
|
||||
if ":" in s:
|
||||
out = s.rsplit(":", 1)
|
||||
# We need to look for .well-known redirects before passing the request up to
|
||||
# HTTPAdapter.send().
|
||||
assert isinstance(request.url, str)
|
||||
parsed = urlparse.urlsplit(request.url)
|
||||
server_name = parsed.netloc
|
||||
well_known = self._get_well_known(parsed.netloc)
|
||||
|
||||
if well_known:
|
||||
server_name = well_known
|
||||
|
||||
# replace the scheme in the uri with https, so that cert verification is done
|
||||
# also replace the hostname if we got a .well-known result
|
||||
request.url = urlparse.urlunsplit(
|
||||
("https", server_name, parsed.path, parsed.query, parsed.fragment)
|
||||
)
|
||||
|
||||
# at this point we also add the host header (otherwise urllib will add one
|
||||
# based on the `host` from the connection returned by `get_connection`,
|
||||
# which will be wrong if there is an SRV record).
|
||||
request.headers["Host"] = server_name
|
||||
|
||||
return super().send(request, *args, **kwargs)
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
# overrides the get_connection() method in the base class
|
||||
parsed = urlparse.urlsplit(url)
|
||||
(host, port, ssl_server_name) = self._lookup(parsed.netloc)
|
||||
print(
|
||||
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
|
||||
)
|
||||
return self.poolmanager.connection_from_host(
|
||||
host,
|
||||
port=port,
|
||||
scheme="https",
|
||||
pool_kwargs={"server_hostname": ssl_server_name},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _lookup(server_name: str) -> Tuple[str, int, str]:
|
||||
"""
|
||||
Do an SRV lookup on a server name and return the host:port to connect to
|
||||
Given the server_name (after any .well-known lookup), return the host, port and
|
||||
the ssl server name
|
||||
"""
|
||||
if server_name[-1] == "]":
|
||||
# ipv6 literal (with no port)
|
||||
return server_name, 8448, server_name
|
||||
|
||||
if ":" in server_name:
|
||||
# explicit port
|
||||
out = server_name.rsplit(":", 1)
|
||||
try:
|
||||
port = int(out[1])
|
||||
except ValueError:
|
||||
raise ValueError("Invalid host:port '%s'" % s)
|
||||
return out[0], port
|
||||
|
||||
# try a .well-known lookup
|
||||
if not skip_well_known:
|
||||
well_known = MatrixConnectionAdapter.get_well_known(s)
|
||||
if well_known:
|
||||
return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
|
||||
raise ValueError("Invalid host:port '%s'" % (server_name,))
|
||||
return out[0], port, out[0]
|
||||
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
|
||||
print(
|
||||
f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return srv.host, srv.port, server_name
|
||||
except Exception:
|
||||
return s, 8448
|
||||
return server_name, 8448, server_name
|
||||
|
||||
@staticmethod
|
||||
def get_well_known(server_name: str) -> Optional[str]:
|
||||
uri = "https://%s/.well-known/matrix/server" % (server_name,)
|
||||
print("fetching %s" % (uri,), file=sys.stderr)
|
||||
def _get_well_known(server_name: str) -> Optional[str]:
|
||||
if ":" in server_name:
|
||||
# explicit port, or ipv6 literal. Either way, no .well-known
|
||||
return None
|
||||
|
||||
# TODO: check for ipv4 literals
|
||||
|
||||
uri = f"https://{server_name}/.well-known/matrix/server"
|
||||
print(f"fetching {uri}", file=sys.stderr)
|
||||
|
||||
try:
|
||||
resp = requests.get(uri)
|
||||
@@ -304,19 +369,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_connection(
|
||||
self, url: str, proxies: Optional[Dict[str, str]] = None
|
||||
) -> HTTPConnectionPool:
|
||||
parsed = urlparse.urlparse(url)
|
||||
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlparse.urlunparse(
|
||||
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||
)
|
||||
return super().get_connection(url, proxies)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -219,9 +219,7 @@ def _prepare() -> None:
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
# Type ignore will no longer be needed after GitPython 3.1.28.
|
||||
# See https://github.com/gitpython-developers/GitPython/pull/1419
|
||||
repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
|
||||
repo.create_head(release_branch_name, commit=base_branch)
|
||||
|
||||
# Special-case SyTest: we don't actually prepare any files so we may
|
||||
# as well push it now (and only when we create a release branch;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -26,7 +26,11 @@ class PushRules:
|
||||
|
||||
class FilteredPushRules:
|
||||
def __init__(
|
||||
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
|
||||
self,
|
||||
push_rules: PushRules,
|
||||
enabled_map: Dict[str, bool],
|
||||
msc3664_enabled: bool,
|
||||
msc1767_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
@@ -41,10 +45,12 @@ class PushRuleEvaluator:
|
||||
notification_power_levels: Mapping[str, int],
|
||||
related_events_flattened: Mapping[str, Mapping[str, str]],
|
||||
related_event_match_enabled: bool,
|
||||
room_version_feature_flags: list[str],
|
||||
msc3931_enabled: bool,
|
||||
): ...
|
||||
def run(
|
||||
self,
|
||||
push_rules: FilteredPushRules,
|
||||
user_id: Optional[str],
|
||||
display_name: Optional[str],
|
||||
) -> Collection[dict]: ...
|
||||
) -> Collection[Union[Mapping, str]]: ...
|
||||
|
||||
@@ -713,7 +713,7 @@ class HttpResponseException(CodeMessageException):
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
The error converted to a SynapseError.
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Callable, Dict, Optional
|
||||
from typing import Callable, Dict, List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
@@ -51,6 +51,13 @@ class RoomDisposition:
|
||||
UNSTABLE = "unstable"
|
||||
|
||||
|
||||
class PushRuleRoomFlag:
|
||||
"""Enum for listing possible MSC3931 room version feature flags, for push rules"""
|
||||
|
||||
# MSC3932: Room version supports MSC1767 Extensible Events.
|
||||
EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RoomVersion:
|
||||
"""An object which describes the unique attributes of a room version."""
|
||||
@@ -91,6 +98,12 @@ class RoomVersion:
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: List[str] # values from PushRuleRoomFlag
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -111,6 +124,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -129,6 +143,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -147,6 +162,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -165,6 +181,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -183,6 +200,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -201,6 +219,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -219,6 +238,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -237,6 +257,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -255,6 +276,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -273,6 +295,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
@@ -291,6 +314,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -309,6 +333,7 @@ class RoomVersions:
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
@@ -327,6 +352,27 @@ class RoomVersions:
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=[],
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
"org.matrix.msc1767.10",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse.util.caches
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ManholeConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import init_tracer
|
||||
from synapse.metrics import install_gc_manager, register_threadpool
|
||||
@@ -264,26 +266,18 @@ def register_start(
|
||||
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
|
||||
|
||||
|
||||
def listen_metrics(
|
||||
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
|
||||
) -> None:
|
||||
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
"""
|
||||
from prometheus_client import start_http_server as start_http_server_prometheus
|
||||
|
||||
from synapse.metrics import (
|
||||
RegistryProxy,
|
||||
start_http_server as start_http_server_legacy,
|
||||
)
|
||||
from synapse.metrics import RegistryProxy
|
||||
|
||||
for host in bind_addresses:
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
if enable_legacy_metric_names:
|
||||
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
|
||||
else:
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
_set_prometheus_client_use_created_metrics(False)
|
||||
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
|
||||
@@ -357,6 +351,55 @@ def listen_tcp(
|
||||
return r # type: ignore[return-value]
|
||||
|
||||
|
||||
def listen_http(
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
max_request_body_size: int,
|
||||
context_factory: Optional[IOpenSSLContextFactory],
|
||||
reactor: ISynapseReactor = reactor,
|
||||
) -> List[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
)
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
context_factory,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=reactor,
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return ports
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
bind_addresses: Collection[str],
|
||||
port: int,
|
||||
|
||||
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.admin import ExfiltrationWriter
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
|
||||
ApplicationServiceWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.types import StateMap
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
|
||||
|
||||
|
||||
class AdminCmdSlavedStore(
|
||||
SlavedFilteringStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedDeviceStore,
|
||||
FilteringWorkerStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
DeviceInboxWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
def __init__(
|
||||
|
||||
@@ -14,14 +14,12 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Dict, List
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
import synapse
|
||||
import synapse.events
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.api.urls import (
|
||||
CLIENT_API_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
@@ -43,51 +41,11 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client import (
|
||||
account_data,
|
||||
events,
|
||||
initial_sync,
|
||||
login,
|
||||
presence,
|
||||
profile,
|
||||
push_rule,
|
||||
read_marker,
|
||||
receipts,
|
||||
relations,
|
||||
room,
|
||||
room_batch,
|
||||
room_keys,
|
||||
sendtodevice,
|
||||
sync,
|
||||
tags,
|
||||
user_directory,
|
||||
versions,
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
KeyQueryServlet,
|
||||
OneTimeKeyServlet,
|
||||
)
|
||||
from synapse.rest.client.register import (
|
||||
RegisterRestServlet,
|
||||
RegistrationTokenValidityRestServlet,
|
||||
)
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
@@ -101,8 +59,16 @@ from synapse.storage.databases.main.appservice import (
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.directory import DirectoryWorkerStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.filtering import FilteringWorkerStore
|
||||
from synapse.storage.databases.main.keys import KeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
@@ -111,118 +77,31 @@ from synapse.storage.databases.main.monthly_active_users import (
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
from synapse.storage.databases.main.registration import RegistrationWorkerStore
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.storage.databases.main.room_batch import RoomBatchStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.session import SessionStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs: HomeServer):
|
||||
"""
|
||||
Args:
|
||||
hs: server
|
||||
"""
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, device_id: Optional[str]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if device_id is not None:
|
||||
# passing the device_id here is deprecated; however, we allow it
|
||||
# for now for compatibility with older clients.
|
||||
if requester.device_id is not None and device_id != requester.device_id:
|
||||
logger.warning(
|
||||
"Client uploading keys for a different device "
|
||||
"(logged in as %s, uploading for %s)",
|
||||
requester.device_id,
|
||||
device_id,
|
||||
)
|
||||
else:
|
||||
device_id = requester.device_id
|
||||
|
||||
if device_id is None:
|
||||
raise SynapseError(
|
||||
400, "To upload keys, you must pass device_id when authenticating"
|
||||
)
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers: Dict[bytes, List[bytes]] = {
|
||||
header: list(request.requestHeaders.getRawHeaders(header, []))
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = list(
|
||||
request.requestHeaders.getRawHeaders(b"X-Forwarded-For", [])
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
|
||||
previous_host = request.client.host.encode("ascii")
|
||||
# If the header exists, add to the comma-separated list of the first
|
||||
# instance of the header. Otherwise, generate a new header.
|
||||
if x_forwarded_for:
|
||||
x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
|
||||
x_forwarded_for.extend(x_forwarded_for[1:])
|
||||
else:
|
||||
x_forwarded_for = [previous_host]
|
||||
headers[b"X-Forwarded-For"] = x_forwarded_for
|
||||
|
||||
# Replicate the original X-Forwarded-Proto header. Note that
|
||||
# XForwardedForRequest overrides isSecure() to give us the original protocol
|
||||
# used by the client, as opposed to the protocol used by our upstream proxy
|
||||
# - which is what we want here.
|
||||
headers[b"X-Forwarded-Proto"] = [
|
||||
b"https" if request.isSecure() else b"http"
|
||||
]
|
||||
|
||||
try:
|
||||
result = await self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error() from e
|
||||
except RequestSendFailed as e:
|
||||
raise SynapseError(502, "Failed to talk to master") from e
|
||||
|
||||
return 200, result
|
||||
else:
|
||||
# Just interested in counts.
|
||||
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
@@ -232,26 +111,36 @@ class GenericWorkerSlavedStore(
|
||||
EndToEndRoomKeyStore,
|
||||
PresenceStore,
|
||||
DeviceInboxWorkerStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
DeviceWorkerStore,
|
||||
TagsWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
SlavedPusherStore,
|
||||
CensorEventsStore,
|
||||
ClientIpWorkerStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
|
||||
# the races it creates aren't too bad.
|
||||
KeyStore,
|
||||
RoomWorkerStore,
|
||||
RoomBatchStore,
|
||||
DirectoryWorkerStore,
|
||||
PushRulesWorkerStore,
|
||||
ApplicationServiceTransactionWorkerStore,
|
||||
ApplicationServiceWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
SlavedFilteringStore,
|
||||
FilteringWorkerStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
ServerMetricsStore,
|
||||
PusherWorkerStore,
|
||||
RoomMemberWorkerStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
ReceiptsWorkerStore,
|
||||
StreamWorkerStore,
|
||||
EventsWorkerStore,
|
||||
RegistrationWorkerStore,
|
||||
SearchStore,
|
||||
TransactionWorkerStore,
|
||||
@@ -268,15 +157,9 @@ class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
|
||||
assert listener_config.http_options is not None
|
||||
|
||||
site_tag = listener_config.http_options.tag
|
||||
if site_tag is None:
|
||||
site_tag = str(port)
|
||||
|
||||
# We always include a health resource.
|
||||
resources: Dict[str, Resource] = {"/health": HealthResource()}
|
||||
|
||||
@@ -285,45 +168,7 @@ class GenericWorkerServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
RegisterRestServlet(self).register(resource)
|
||||
RegistrationTokenValidityRestServlet(self).register(resource)
|
||||
login.register_servlets(self, resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
WhoamiRestServlet(self).register(resource)
|
||||
DevicesRestServlet(self).register(resource)
|
||||
|
||||
# Read-only
|
||||
KeyUploadServlet(self).register(resource)
|
||||
KeyQueryServlet(self).register(resource)
|
||||
KeyChangesServlet(self).register(resource)
|
||||
OneTimeKeyServlet(self).register(resource)
|
||||
|
||||
voip.register_servlets(self, resource)
|
||||
push_rule.register_servlets(self, resource)
|
||||
versions.register_servlets(self, resource)
|
||||
|
||||
profile.register_servlets(self, resource)
|
||||
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
room.register_servlets(self, resource, is_worker=True)
|
||||
relations.register_servlets(self, resource)
|
||||
room.register_deprecated_servlets(self, resource)
|
||||
initial_sync.register_servlets(self, resource)
|
||||
room_batch.register_servlets(self, resource)
|
||||
room_keys.register_servlets(self, resource)
|
||||
tags.register_servlets(self, resource)
|
||||
account_data.register_servlets(self, resource)
|
||||
receipts.register_servlets(self, resource)
|
||||
read_marker.register_servlets(self, resource)
|
||||
|
||||
sendtodevice.register_servlets(self, resource)
|
||||
|
||||
user_directory.register_servlets(self, resource)
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
resource: Resource = ClientRestResource(self)
|
||||
|
||||
resources[CLIENT_API_PREFIX] = resource
|
||||
|
||||
@@ -375,23 +220,15 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
),
|
||||
_base.listen_http(
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self) -> None:
|
||||
for listener in self.config.worker.worker_listeners:
|
||||
if listener.type == "http":
|
||||
@@ -413,7 +250,6 @@ class GenericWorkerServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
logger.warning("Unsupported listener type: %s", listener.type)
|
||||
|
||||
@@ -37,8 +37,7 @@ from synapse.api.urls import (
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import (
|
||||
handle_startup_exception,
|
||||
listen_ssl,
|
||||
listen_tcp,
|
||||
listen_http,
|
||||
max_request_body_size,
|
||||
redirect_stdio_to_logs,
|
||||
register_start,
|
||||
@@ -53,7 +52,6 @@ from synapse.http.server import (
|
||||
RootOptionsRedirectResource,
|
||||
StaticResource,
|
||||
)
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
@@ -83,8 +81,6 @@ class SynapseHomeServer(HomeServer):
|
||||
self, config: HomeServerConfig, listener_config: ListenerConfig
|
||||
) -> Iterable[Port]:
|
||||
port = listener_config.port
|
||||
bind_addresses = listener_config.bind_addresses
|
||||
tls = listener_config.tls
|
||||
# Must exist since this is an HTTP listener.
|
||||
assert listener_config.http_options is not None
|
||||
site_tag = listener_config.http_options.tag
|
||||
@@ -140,37 +136,15 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
root_resource = OptionsResource()
|
||||
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
ports = listen_http(
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
max_request_body_size(self.config),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
if tls:
|
||||
# refresh_certificate should have been called before this.
|
||||
assert self.tls_server_context_factory is not None
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
|
||||
else:
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
site,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
|
||||
return ports
|
||||
|
||||
def _configure_named_resource(
|
||||
@@ -291,7 +265,6 @@ class SynapseHomeServer(HomeServer):
|
||||
_base.listen_metrics(
|
||||
listener.bind_addresses,
|
||||
listener.port,
|
||||
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
|
||||
)
|
||||
else:
|
||||
# this shouldn't happen, as the listener type should have been checked
|
||||
|
||||
@@ -32,9 +32,9 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Type for the `device_one_time_key_counts` field in an appservice transaction
|
||||
# Type for the `device_one_time_keys_count` field in an appservice transaction
|
||||
# user ID -> {device ID -> {algorithm -> count}}
|
||||
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
|
||||
TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
|
||||
|
||||
# Type for the `device_unused_fallback_key_types` field in an appservice transaction
|
||||
# user ID -> {device ID -> [algorithm]}
|
||||
@@ -376,7 +376,7 @@ class AppServiceTransaction:
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
):
|
||||
@@ -385,7 +385,7 @@ class AppServiceTransaction:
|
||||
self.events = events
|
||||
self.ephemeral = ephemeral
|
||||
self.to_device_messages = to_device_messages
|
||||
self.one_time_key_counts = one_time_key_counts
|
||||
self.one_time_keys_count = one_time_keys_count
|
||||
self.unused_fallback_keys = unused_fallback_keys
|
||||
self.device_list_summary = device_list_summary
|
||||
|
||||
@@ -402,7 +402,7 @@ class AppServiceTransaction:
|
||||
events=self.events,
|
||||
ephemeral=self.ephemeral,
|
||||
to_device_messages=self.to_device_messages,
|
||||
one_time_key_counts=self.one_time_key_counts,
|
||||
one_time_keys_count=self.one_time_keys_count,
|
||||
unused_fallback_keys=self.unused_fallback_keys,
|
||||
device_list_summary=self.device_list_summary,
|
||||
txn_id=self.id,
|
||||
|
||||
@@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
@@ -262,7 +262,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
to_device_messages: List[JsonDict],
|
||||
one_time_key_counts: TransactionOneTimeKeyCounts,
|
||||
one_time_keys_count: TransactionOneTimeKeysCount,
|
||||
unused_fallback_keys: TransactionUnusedFallbackKeys,
|
||||
device_list_summary: DeviceListUpdates,
|
||||
txn_id: Optional[int] = None,
|
||||
@@ -310,10 +310,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
|
||||
if service.msc3202_transaction_extensions:
|
||||
if one_time_key_counts:
|
||||
if one_time_keys_count:
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_key_counts"
|
||||
] = one_time_key_counts
|
||||
] = one_time_keys_count
|
||||
body[
|
||||
"org.matrix.msc3202.device_one_time_keys_count"
|
||||
] = one_time_keys_count
|
||||
if unused_fallback_keys:
|
||||
body[
|
||||
"org.matrix.msc3202.device_unused_fallback_key_types"
|
||||
|
||||
@@ -64,7 +64,7 @@ from typing import (
|
||||
from synapse.appservice import (
|
||||
ApplicationService,
|
||||
ApplicationServiceState,
|
||||
TransactionOneTimeKeyCounts,
|
||||
TransactionOneTimeKeysCount,
|
||||
TransactionUnusedFallbackKeys,
|
||||
)
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
@@ -258,7 +258,7 @@ class _ServiceQueuer:
|
||||
):
|
||||
return
|
||||
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
|
||||
|
||||
if (
|
||||
@@ -269,7 +269,7 @@ class _ServiceQueuer:
|
||||
# for the users which are mentioned in this transaction,
|
||||
# as well as the appservice's sender.
|
||||
(
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
) = await self._compute_msc3202_otk_counts_and_fallback_keys(
|
||||
service, events, ephemeral, to_device_messages_to_send
|
||||
@@ -281,7 +281,7 @@ class _ServiceQueuer:
|
||||
events,
|
||||
ephemeral,
|
||||
to_device_messages_to_send,
|
||||
one_time_key_counts,
|
||||
one_time_keys_count,
|
||||
unused_fallback_keys,
|
||||
device_list_summary,
|
||||
)
|
||||
@@ -296,7 +296,7 @@ class _ServiceQueuer:
|
||||
events: Iterable[EventBase],
|
||||
ephemerals: Iterable[JsonDict],
|
||||
to_device_messages: Iterable[JsonDict],
|
||||
) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
|
||||
) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
|
||||
"""
|
||||
Given a list of the events, ephemeral messages and to-device messages,
|
||||
- first computes a list of application services users that may have
|
||||
@@ -367,7 +367,7 @@ class _TransactionController:
|
||||
events: List[EventBase],
|
||||
ephemeral: Optional[List[JsonDict]] = None,
|
||||
to_device_messages: Optional[List[JsonDict]] = None,
|
||||
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
|
||||
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
|
||||
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
|
||||
device_list_summary: Optional[DeviceListUpdates] = None,
|
||||
) -> None:
|
||||
@@ -380,7 +380,7 @@ class _TransactionController:
|
||||
events: The persistent events to include in the transaction.
|
||||
ephemeral: The ephemeral events to include in the transaction.
|
||||
to_device_messages: The to-device messages to include in the transaction.
|
||||
one_time_key_counts: Counts of remaining one-time keys for relevant
|
||||
one_time_keys_count: Counts of remaining one-time keys for relevant
|
||||
appservice devices in the transaction.
|
||||
unused_fallback_keys: Lists of unused fallback keys for relevant
|
||||
appservice devices in the transaction.
|
||||
@@ -397,7 +397,7 @@ class _TransactionController:
|
||||
events=events,
|
||||
ephemeral=ephemeral or [],
|
||||
to_device_messages=to_device_messages or [],
|
||||
one_time_key_counts=one_time_key_counts or {},
|
||||
one_time_keys_count=one_time_keys_count or {},
|
||||
unused_fallback_keys=unused_fallback_keys or {},
|
||||
device_list_summary=device_list_summary or DeviceListUpdates(),
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -53,9 +54,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3266 (room summary api)
|
||||
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
|
||||
|
||||
# MSC3030 (Jump to date API endpoint)
|
||||
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
|
||||
|
||||
# MSC2409 (this setting only relates to optionally sending to-device messages).
|
||||
# Presence, typing and read receipt EDUs are already sent to application services that
|
||||
# have opted in to receive them. If enabled, this adds to-device messages to that list.
|
||||
@@ -131,3 +129,10 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3912: Relation-based redactions.
|
||||
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
|
||||
|
||||
# MSC1767 and friends: Extensible Events
|
||||
self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
|
||||
if self.msc1767_enabled:
|
||||
# Enable room version (and thus applicable push rules from MSC3931/3932)
|
||||
version_id = RoomVersions.MSC1767v10.identifier
|
||||
KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10
|
||||
|
||||
@@ -317,10 +317,9 @@ def setup_logging(
|
||||
Set up the logging subsystem.
|
||||
|
||||
Args:
|
||||
config (LoggingConfig | synapse.config.worker.WorkerConfig):
|
||||
configuration data
|
||||
config: configuration data
|
||||
|
||||
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||
use_worker_options: True to use the 'worker_log_config' option
|
||||
instead of 'log_config'.
|
||||
|
||||
logBeginner: The Twisted logBeginner to use.
|
||||
|
||||
@@ -43,8 +43,6 @@ class MetricsConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
self.report_stats_endpoint = config.get(
|
||||
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user