1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
David Robertson
5c09f28a30 dirty script tee hee 2022-11-23 19:38:06 +00:00
171 changed files with 1459 additions and 3184 deletions

View File

@@ -27,7 +27,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -61,7 +61,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -134,7 +134,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -208,7 +208,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -1,74 +0,0 @@
# This task does not run complement tests, see tests.yaml instead.
# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
name: Store complement-synapse image in ghcr.io
on:
push:
branches: [ "master" ]
schedule:
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
branch:
required: true
default: 'develop'
type: choice
options:
- develop
- master
# Only run this action once per pull request/branch; restart if a new commit arrives.
# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
name: Build and push complement image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout specific branch (debug build)
uses: actions/checkout@v3
if: github.event_name == 'workflow_dispatch'
with:
ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@v3
if: github.event_name == 'schedule'
with:
ref: develop
- name: Checkout clean copy of master (on-push)
uses: actions/checkout@v3
if: github.event_name == 'push'
with:
ref: master
- name: Login to registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}/complement-synapse
tags: |
type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
type=sha,format=long
- name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
run: scripts-dev/complement.sh --build-only
- name: Tag and push generated image
run: |
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
echo "tag and push $TAG"
docker tag complement-synapse $TAG
docker push $TAG
done

View File

@@ -27,7 +27,6 @@ jobs:
rust:
- 'rust/**'
- 'Cargo.toml'
- 'Cargo.lock'
check-sampleconfig:
runs-on: ubuntu-latest
@@ -103,35 +102,13 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy -- -D warnings
# We also lint against a nightly rustc so that we can lint the benchmark
# suite, which requires a nightly compiler.
lint-clippy-nightly:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v3
- name: Install Rust
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: nightly-2022-12-01
components: clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --all-features -- -D warnings
- run: cargo clippy
lint-rustfmt:
runs-on: ubuntu-latest
@@ -145,7 +122,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
components: rustfmt
@@ -207,7 +184,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -251,7 +228,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -369,7 +346,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -512,7 +489,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2
@@ -540,7 +517,7 @@ jobs:
# There don't seem to be versioned releases of this action per se: for each rust
# version there is a branch which gets constantly rebased on top of master.
# We pin to a specific commit for paranoia's sake.
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: 1.58.1
- uses: Swatinem/rust-cache@v2

View File

@@ -18,7 +18,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -43,7 +43,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -82,7 +82,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
with:
toolchain: stable
- uses: Swatinem/rust-cache@v2
@@ -174,7 +174,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -1,77 +1,3 @@
Synapse 1.73.0rc2 (2022-12-01)
==============================
Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
Bugfixes
--------
- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
Synapse 1.73.0rc1 (2022-11-29)
==============================
Features
--------
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
- Adds support for handling avatar in SSO login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
Bugfixes
--------
- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
Improved Documentation
----------------------
- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
Deprecations and Removals
-------------------------
- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
Internal Changes
----------------
- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
Synapse 1.72.0 (2022-11-22)
===========================

16
Cargo.lock generated
View File

@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.148"
version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.148"
version = "1.0.147"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
dependencies = [
"proc-macro2",
"quote",
@@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.89"
version = "1.0.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7"
dependencies = [
"itoa",
"ryu",
@@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.104"
version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
dependencies = [
"proc-macro2",
"quote",

View File

@@ -3,7 +3,3 @@
[workspace]
members = ["rust"]
[profile.dbgrelease]
inherits = "release"
debug = true

1
changelog.d/14055.misc Normal file
View File

@@ -0,0 +1 @@
Add missing type hints to `HomeServer`.

View File

@@ -1 +0,0 @@
Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar).

1
changelog.d/14376.misc Normal file
View File

@@ -0,0 +1 @@
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).

1
changelog.d/14393.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process.

1
changelog.d/14400.misc Normal file
View File

@@ -0,0 +1 @@
Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.

1
changelog.d/14403.misc Normal file
View File

@@ -0,0 +1 @@
Faster joins: do not wait for full state when creating events to send.

1
changelog.d/14404.misc Normal file
View File

@@ -0,0 +1 @@
Faster joins: filter out non local events when a room doesn't have its full state.

1
changelog.d/14412.misc Normal file
View File

@@ -0,0 +1 @@
Remove duplicated type information from type hints.

1
changelog.d/14449.misc Normal file
View File

@@ -0,0 +1 @@
Fix type logic in TCP replication code that prevented correctly ignoring blank commands.

1
changelog.d/14452.misc Normal file
View File

@@ -0,0 +1 @@
Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default.

1
changelog.d/14468.misc Normal file
View File

@@ -0,0 +1 @@
Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).

1
changelog.d/14476.misc Normal file
View File

@@ -0,0 +1 @@
Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.

1
changelog.d/14479.misc Normal file
View File

@@ -0,0 +1 @@
`scripts-dev/federation_client`: Fix routing on servers with `.well-known` files.

1
changelog.d/14487.misc Normal file
View File

@@ -0,0 +1 @@
Reduce default third party invite rate limit to 216 invites per day.

View File

@@ -1 +0,0 @@
Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`.

1
changelog.d/14490.misc Normal file
View File

@@ -0,0 +1 @@
Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash.

View File

@@ -0,0 +1 @@
Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.

View File

@@ -1 +0,0 @@
Update worker settings for `pusher` and `federation_sender` functionality.

1
changelog.d/14496.misc Normal file
View File

@@ -0,0 +1 @@
Refactor `federation_sender` and `pusher` configuration loading.

1
changelog.d/14499.doc Normal file
View File

@@ -0,0 +1 @@
Fixed link to 'Synapse administration endpoints'.

1
changelog.d/14500.misc Normal file
View File

@@ -0,0 +1 @@
Bump pygithub from 1.56 to 1.57.

1
changelog.d/14501.misc Normal file
View File

@@ -0,0 +1 @@
Bump sentry-sdk from 1.10.1 to 1.11.0.

1
changelog.d/14502.misc Normal file
View File

@@ -0,0 +1 @@
Bump types-pillow from 9.2.2.1 to 9.3.0.1.

1
changelog.d/14503.misc Normal file
View File

@@ -0,0 +1 @@
Bump towncrier from 21.9.0 to 22.8.0.

1
changelog.d/14504.misc Normal file
View File

@@ -0,0 +1 @@
Bump phonenumbers from 8.12.56 to 8.13.0.

1
changelog.d/14505.misc Normal file
View File

@@ -0,0 +1 @@
Bump serde_json from 1.0.87 to 1.0.88.

View File

@@ -0,0 +1 @@
Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.

View File

@@ -0,0 +1 @@
Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.

1
changelog.d/14516.misc Normal file
View File

@@ -0,0 +1 @@
Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row.

View File

@@ -1 +0,0 @@
Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages.

1
changelog.d/14522.misc Normal file
View File

@@ -0,0 +1 @@
Add more prompts to the bug report form.

View File

@@ -1 +0,0 @@
Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`.

1
changelog.d/14526.misc Normal file
View File

@@ -0,0 +1 @@
Extend editorconfig rules on indent and line length to `.pyi` files.

1
changelog.d/14527.misc Normal file
View File

@@ -0,0 +1 @@
Speed-up `/messages` with `filter_events_for_client` optimizations.

View File

@@ -1 +0,0 @@
Share the `ClientRestResource` for both workers and the main process.

1
changelog.d/14534.misc Normal file
View File

@@ -0,0 +1 @@
Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`.

View File

@@ -1 +0,0 @@
Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room.

View File

@@ -1 +0,0 @@
Add new `push.enabled` config option to allow opting out of push notification calculation.

View File

@@ -1 +0,0 @@
Modernize unit tests configuration related to workers.

View File

@@ -1 +0,0 @@
Advertise support for Matrix 1.5 on `/_matrix/client/versions`.

View File

@@ -1 +0,0 @@
Bump jsonschema from 4.17.0 to 4.17.3.

View File

@@ -1 +0,0 @@
Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances.

View File

@@ -1 +0,0 @@
Fix Rust lint CI.

View File

@@ -1 +0,0 @@
Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1.

12
debian/changelog vendored
View File

@@ -1,15 +1,3 @@
matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
* New Synapse release 1.73.0rc2.
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Dec 2022 10:02:19 +0000
matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
* New Synapse release 1.73.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Nov 2022 12:28:13 +0000
matrix-synapse-py3 (1.72.0) stable; urgency=medium
* New Synapse release 1.72.0.

View File

@@ -100,6 +100,8 @@ experimental_features:
# client-side support for partial state in /send_join responses
faster_joins: true
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
# Filtering /messages by relation type.
msc3874_enabled: true

View File

@@ -140,7 +140,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
],
"shared_extra_conf": {},
@@ -164,7 +163,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/federation/(v1|v2)/invite/",
"^/_matrix/federation/(v1|v2)/query_auth/",
"^/_matrix/federation/(v1|v2)/event_auth/",
"^/_matrix/federation/v1/timestamp_to_event/",
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
"^/_matrix/federation/(v1|v2)/user/devices/",
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",

View File

@@ -84,9 +84,7 @@ file when you upgrade the Debian package to a later version.
##### Downstream Debian packages
Andrej Shadura maintains a
[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
the Debian repositories.
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
For `bookworm` and `sid`, it can be installed simply with:
```sh
@@ -102,27 +100,23 @@ for information on how to use backports.
##### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository
at this time, as they are [old and suffer from known security vulnerabilities](
https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
).
at this time, as they are old and suffer from known security vulnerabilities.
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
#### Fedora
Synapse is in the Fedora repositories as
[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
Synapse is in the Fedora repositories as `matrix-synapse`:
```sh
sudo dnf install matrix-synapse
```
Additionally, Oleg Girko provides Fedora RPMs at
Oleg Girko provides Fedora RPMs at
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
#### OpenSUSE
Synapse is in the OpenSUSE repositories as
[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
```sh
sudo zypper install matrix-synapse
@@ -157,8 +151,7 @@ sudo pip install py-bcrypt
#### Void Linux
Synapse can be found in the void repositories as
['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
Synapse can be found in the void repositories as 'synapse':
```sh
xbps-install -Su

View File

@@ -88,28 +88,6 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed
Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
and offered an option to disable them.
Synapse v1.71.0 disabled legacy Prometheus metric names by default.
This version, v1.73.0, removes those legacy Prometheus metric names entirely.
This also means that the `enable_legacy_metrics` configuration option has been
removed; it will no longer be possible to re-enable the legacy metric names.
If you use metrics and have not yet updated your Grafana dashboard(s),
Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
to this version.
Note that the included Grafana dashboard was updated in v1.72.0 to correct some
metric names which were missed when legacy metrics were disabled by default.
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
for more context.
# Upgrading to v1.72.0
## Dropping support for PostgreSQL 10

View File

@@ -858,7 +858,7 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been
purged are ignored and not stored again.
The message retention policies feature is disabled by default. Please be advised
The message retention policies feature is disabled by default. Please be advised
that enabling this feature carries some risk. There are known bugs with the implementation
which can cause database corruption. Setting retention to delete older history
is less risky than deleting newer history but in general caution is advised when enabling this
@@ -2437,6 +2437,31 @@ Example configuration:
enable_metrics: true
```
---
### `enable_legacy_metrics`
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
They are included for backwards compatibility.
Example configuration:
```yaml
enable_legacy_metrics: false
```
See https://github.com/matrix-org/synapse/issues/11106 for context.
*Since v1.67.0.*
**Will be removed in v1.73.0.**
---
### `sentry`
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@@ -2968,17 +2993,10 @@ Options for each entry include:
For the default provider, the following settings are available:
* `subject_claim`: name of the claim containing a unique identifier
* subject_claim: name of the claim containing a unique identifier
for the user. Defaults to 'sub', which OpenID Connect
compliant providers should provide.
* `picture_claim`: name of the claim containing an url for the user's profile picture.
Defaults to 'picture', which OpenID Connect compliant providers should provide
and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
Currently only supported in monolithic (single-process) server configurations
where the media repository runs within the Synapse process.
* `localpart_template`: Jinja2 template for the localpart of the MXID.
If this is not set, the user will be prompted to choose their
own username (see the documentation for the `sso_auth_account_details.html`
@@ -3003,7 +3021,7 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
Defaults to `false`.
@@ -3355,10 +3373,6 @@ Configuration settings related to push notifications
This setting defines options for push notifications.
This option has a number of sub-options. They are as follows:
* `enable_push`: Enables or disables push notification calculation. Note, disabling this will also
stop unread counts being calculated for rooms. This mode of operation is intended
for homeservers which may only have bots or appservice users connected, or are otherwise
not interested in push/unread counters. This is enabled by default.
* `include_content`: Clients requesting push notifications can either have the body of
the message sent in the notification poke along with other details
like the sender, or just the event ID and room ID (`event_id_only`).
@@ -3379,7 +3393,6 @@ This option has a number of sub-options. They are as follows:
Example configuration:
```yaml
push:
enable_push: true
include_content: false
group_unread_count_by_room: false
```
@@ -3425,7 +3438,7 @@ This option has the following sub-options:
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
These indexes are built the first time Synapse starts; admins can
manually trigger a rebuild via the API following the instructions
[for running background updates](../administration/admin_api/background_updates.md#run),
@@ -3684,7 +3697,7 @@ As a result, the worker configuration is divided into two parts.
1. The first part (in this section of the manual) defines which shardable tasks
are delegated to privileged workers. This allows unprivileged workers to make
requests to a privileged worker to act on their behalf.
request a privileged worker to act on their behalf.
1. [The second part](#individual-worker-configuration)
controls the behaviour of individual workers in isolation.
@@ -3696,7 +3709,7 @@ For guidance on setting up workers, see the [worker documentation](../../workers
A shared secret used by the replication APIs on the main process to authenticate
HTTP requests from workers.
The default, this value is omitted (equivalently `null`), which means that
The default, this value is omitted (equivalently `null`), which means that
traffic between the workers and the main process is not authenticated.
Example configuration:
@@ -3706,8 +3719,6 @@ worker_replication_secret: "secret_secret"
---
### `start_pushers`
Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
Controls sending of push notifications on the main process. Set to `false`
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
@@ -3718,30 +3729,25 @@ start_pushers: false
---
### `pusher_instances`
It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
a `pusher_instances` map. Doing so will remove handling of this function from the main
process. Multiple workers can be added to this map, in which case the work is balanced
across them. Ensure the main process and all pusher workers are restarted after changing
this option.
It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
in which case the work is balanced across them. Use this setting to list the pushers by
[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
restarted after changing this option.
Example configuration for a single worker:
```yaml
pusher_instances:
- pusher_worker1
```
And for multiple workers:
If no or only one pusher worker is configured, this setting is not necessary.
The main process will send out push notifications by default if you do not disable
it by setting [`start_pushers: false`](#start_pushers).
Example configuration:
```yaml
start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
```
---
### `send_federation`
Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
Controls sending of outbound federation transactions on the main process.
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
Defaults to `true`.
@@ -3753,36 +3759,29 @@ send_federation: false
---
### `federation_sender_instances`
It is possible to scale the processes that handle sending outbound federation requests
by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
a `federation_sender_instances` map. Doing so will remove handling of this function from
the main process. Multiple workers can be added to this map, in which case the work is
balanced across them.
It is possible to run multiple
[federation sender worker](../../workers.md#synapseappfederation_sender), in which
case the work is balanced across them. Use this setting to list the senders.
This configuration setting must be shared between all workers handling federation
sending, and if changed all federation sender workers must be stopped at the same time
and then started, to ensure that all instances are running with the same config (otherwise
This configuration setting must be shared between all federation sender workers, and if
changed all federation sender workers must be stopped at the same time and then
started, to ensure that all instances are running with the same config (otherwise
events may be dropped).
Example configuration for a single worker:
Example configuration:
```yaml
send_federation: false
federation_sender_instances:
- federation_sender1
```
And for multiple workers:
```yaml
federation_sender_instances:
- federation_sender1
- federation_sender2
```
---
### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`.
(The main process also needs an HTTP replication listener, but it should not be
(The main process also needs an HTTP replication listener, but it should not be
listed in the `instance_map`.)
Example configuration:
@@ -3916,8 +3915,8 @@ worker_replication_http_tls: true
---
### `worker_listeners`
A worker can handle HTTP requests. To do so, a `worker_listeners` option
must be declared, in the same way as the [`listeners` option](#listeners)
A worker can handle HTTP requests. To do so, a `worker_listeners` option
must be declared, in the same way as the [`listeners` option](#listeners)
in the shared config.
Workers declared in [`stream_writers`](#stream_writers) will need to include a
@@ -3936,7 +3935,7 @@ worker_listeners:
### `worker_daemonize`
Specifies whether the worker should be started as a daemon process.
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
must be omitted or set to `false`.
Defaults to `false`.
@@ -3948,11 +3947,11 @@ worker_daemonize: true
---
### `worker_pid_file`
When running a worker as a daemon, we need a place to store the
When running a worker as a daemon, we need a place to store the
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
This option defines the location of that "pid file".
This option is required if `worker_daemonize` is `true` and ignored
This option is required if `worker_daemonize` is `true` and ignored
otherwise. It has no default.
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
@@ -4002,3 +4001,4 @@ background_updates:
min_batch_size: 10
default_batch_size: 50
```

View File

@@ -191,7 +191,6 @@ information.
^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/timestamp_to_event/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
^/_matrix/key/v2/query
@@ -219,7 +218,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests
@@ -505,9 +503,6 @@ worker application type.
### `synapse.app.pusher`
It is likely this option will be deprecated in the future and is not recommended for new
installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
Handles sending push notifications to sygnal and email. Doesn't handle any
REST endpoints itself, but you should set
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
@@ -546,9 +541,6 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
It is likely this option will be deprecated in the future and not recommended for
new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
Handles sending federation traffic to other servers. Doesn't handle any
REST endpoints itself, but you should set
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
@@ -645,9 +637,7 @@ equivalent to `synapse.app.generic_worker`:
* `synapse.app.client_reader`
* `synapse.app.event_creator`
* `synapse.app.federation_reader`
* `synapse.app.federation_sender`
* `synapse.app.frontend_proxy`
* `synapse.app.pusher`
* `synapse.app.synchrotron`

View File

@@ -59,6 +59,16 @@ exclude = (?x)
|tests/server_notices/test_resource_limits_server_notices.py
|tests/test_state.py
|tests/test_terms_auth.py
|tests/util/test_async_helpers.py
|tests/util/test_batching_queue.py
|tests/util/test_dict_cache.py
|tests/util/test_expiring_cache.py
|tests/util/test_file_consumer.py
|tests/util/test_linearizer.py
|tests/util/test_logcontext.py
|tests/util/test_lrucache.py
|tests/util/test_rwlock.py
|tests/util/test_wheel_timer.py
)$
[mypy-synapse.federation.transport.client]
@@ -109,9 +119,6 @@ disallow_untyped_defs = True
[mypy-tests.storage.test_profile]
disallow_untyped_defs = True
[mypy-tests.handlers.test_sso]
disallow_untyped_defs = True
[mypy-tests.storage.test_user_directory]
disallow_untyped_defs = True
@@ -127,12 +134,10 @@ disallow_untyped_defs = True
[mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False
[mypy-tests.util.*]
disallow_untyped_defs = True
[mypy-tests.utils]
disallow_untyped_defs = True
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
;; The `typeshed` project maintains stubs here:

34
poetry.lock generated
View File

@@ -452,7 +452,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
version = "4.17.3"
version = "4.17.0"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -888,17 +888,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
version = "22.1.0"
version = "22.0.0"
description = "Python wrapper module around the OpenSSL library"
category = "main"
optional = false
python-versions = ">=3.6"
[package.dependencies]
cryptography = ">=38.0.0,<39"
cryptography = ">=35.0"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
docs = ["sphinx", "sphinx-rtd-theme"]
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
[[package]]
@@ -1076,7 +1076,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "1.11.1"
version = "1.11.0"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1380,7 +1380,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-bleach"
version = "5.0.3.1"
version = "5.0.3"
description = "Typing stubs for bleach"
category = "dev"
optional = false
@@ -1448,7 +1448,7 @@ python-versions = "*"
[[package]]
name = "types-psycopg2"
version = "2.9.21.2"
version = "2.9.21.1"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
@@ -2013,8 +2013,8 @@ jinja2 = [
{file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
{file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"},
{file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"},
]
keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
@@ -2452,8 +2452,8 @@ pynacl = [
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
]
pyopenssl = [
{file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"},
{file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"},
{file = "pyOpenSSL-22.0.0-py2.py3-none-any.whl", hash = "sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0"},
{file = "pyOpenSSL-22.0.0.tar.gz", hash = "sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf"},
]
pyparsing = [
{file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
@@ -2569,8 +2569,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
]
sentry-sdk = [
{file = "sentry-sdk-1.11.1.tar.gz", hash = "sha256:675f6279b6bb1fea09fd61751061f9a90dca3b5929ef631dd50dc8b3aeb245e9"},
{file = "sentry_sdk-1.11.1-py2.py3-none-any.whl", hash = "sha256:8b4ff696c0bdcceb3f70bbb87a57ba84fd3168b1332d493fcd16c137f709578c"},
{file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
{file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@@ -2781,8 +2781,8 @@ typed-ast = [
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
]
types-bleach = [
{file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
{file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
{file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"},
{file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"},
]
types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
@@ -2813,8 +2813,8 @@ types-pillow = [
{file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
]
types-psycopg2 = [
{file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"},
{file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"},
{file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
{file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
]
types-pyopenssl = [
{file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},

View File

@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.73.0rc2"
version = "1.72.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"

View File

@@ -33,12 +33,10 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
0,
Default::default(),
Default::default(),
true,
vec![],
false,
)
.unwrap();
@@ -69,12 +67,10 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
0,
Default::default(),
Default::default(),
true,
vec![],
false,
)
.unwrap();
@@ -105,12 +101,10 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
0,
Default::default(),
Default::default(),
true,
vec![],
false,
)
.unwrap();
@@ -141,12 +135,10 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
0,
Default::default(),
Default::default(),
true,
vec![],
false,
)
.unwrap();

View File

@@ -1,77 +0,0 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
use synapse::tree_cache::TreeCache;
use test::Bencher;
extern crate test;
#[bench]
fn bench_tree_cache_get_non_empty(b: &mut Bencher) {
let mut cache: TreeCache<&str, &str> = TreeCache::new();
cache.set(["a", "b", "c", "d"], "f").unwrap();
b.iter(|| cache.get(&["a", "b", "c", "d"]));
}
#[bench]
fn bench_tree_cache_get_empty(b: &mut Bencher) {
let cache: TreeCache<&str, &str> = TreeCache::new();
b.iter(|| cache.get(&["a", "b", "c", "d"]));
}
#[bench]
fn bench_tree_cache_set(b: &mut Bencher) {
let mut cache: TreeCache<&str, &str> = TreeCache::new();
b.iter(|| cache.set(["a", "b", "c", "d"], "f").unwrap());
}
#[bench]
fn bench_tree_cache_length(b: &mut Bencher) {
let mut cache: TreeCache<u32, u32> = TreeCache::new();
for c1 in 0..=10 {
for c2 in 0..=10 {
for c3 in 0..=10 {
for c4 in 0..=10 {
cache.set([c1, c2, c3, c4], 1).unwrap()
}
}
}
}
b.iter(|| cache.len());
}
#[bench]
fn tree_cache_iterate(b: &mut Bencher) {
let mut cache: TreeCache<u32, u32> = TreeCache::new();
for c1 in 0..=10 {
for c2 in 0..=10 {
for c3 in 0..=10 {
for c4 in 0..=10 {
cache.set([c1, c2, c3, c4], 1).unwrap()
}
}
}
}
b.iter(|| cache.items().count());
}

View File

@@ -1,7 +1,6 @@
use pyo3::prelude::*;
pub mod push;
pub mod tree_cache;
/// Returns the hash of all the rust source files at the time it was compiled.
///
@@ -27,7 +26,6 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
push::register_module(py, m)?;
tree_cache::binding::register_module(py, m)?;
Ok(())
}

View File

@@ -274,156 +274,6 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed(
"global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
priority_class: 1,
@@ -452,126 +302,6 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.encrypted")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.message")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.file")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.image")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.video")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
pattern: Some(Cow::Borrowed("m.audio")),
pattern_type: None,
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
// RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
priority_class: 1,

View File

@@ -29,33 +29,6 @@ use super::{
lazy_static! {
/// Used to parse the `is` clause in the room member count condition.
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
/// Used to determine which MSC3931 room version feature flags are actually known to
/// the push evaluator.
static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
];
/// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
/// declare Extensible Events support ultimately *disable* push rules which do not declare
/// *any* MSC3931 room_version_supports condition).
static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
"global/override/.m.rule.master".to_string(),
"global/override/.m.rule.roomnotif".to_string(),
"global/content/.m.rule.contains_user_name".to_string(),
];
}
enum RoomVersionFeatures {
ExtensibleEvents,
}
impl RoomVersionFeatures {
fn as_str(&self) -> &'static str {
match self {
RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
}
}
}
/// Allows running a set of push rules against a particular event.
@@ -84,19 +57,11 @@ pub struct PushRuleEvaluator {
/// If msc3664, push rules for related events, is enabled.
related_event_match_enabled: bool,
/// If MSC3931 is applicable, the feature flags for the room version.
room_version_feature_flags: Vec<String>,
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
}
#[pymethods]
impl PushRuleEvaluator {
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
#[allow(clippy::too_many_arguments)]
#[new]
pub fn py_new(
flattened_keys: BTreeMap<String, String>,
@@ -105,8 +70,6 @@ impl PushRuleEvaluator {
notification_power_levels: BTreeMap<String, i64>,
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
) -> Result<Self, Error> {
let body = flattened_keys
.get("content.body")
@@ -121,8 +84,6 @@ impl PushRuleEvaluator {
sender_power_level,
related_events_flattened,
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
})
}
@@ -145,19 +106,7 @@ impl PushRuleEvaluator {
continue;
}
let rule_id = &push_rule.rule_id().to_string();
let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
let mut has_rver_condition = false;
for condition in push_rule.conditions.iter() {
has_rver_condition |= matches!(
condition,
// per MSC3932, we just need *any* room version condition to match
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
);
match self.match_condition(condition, user_id, display_name) {
Ok(true) => {}
Ok(false) => continue 'outer,
@@ -168,13 +117,6 @@ impl PushRuleEvaluator {
}
}
// MSC3932: Disable push rules in extensible event-supporting room versions if they
// don't describe *any* MSC3931 room version condition, unless the rule is on the
// safe list.
if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
continue;
}
let actions = push_rule
.actions
.iter()
@@ -262,15 +204,6 @@ impl PushRuleEvaluator {
false
}
}
KnownCondition::RoomVersionSupports { feature } => {
if !self.msc3931_enabled {
false
} else {
let flag = feature.to_string();
KNOWN_RVER_FLAGS.contains(&flag)
&& self.room_version_feature_flags.contains(&flag)
}
}
};
Ok(result)
@@ -429,63 +362,9 @@ fn push_rule_evaluator() {
BTreeMap::new(),
BTreeMap::new(),
true,
vec![],
true,
)
.unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3);
}
#[test]
fn test_requires_room_version_supports_condition() {
use std::borrow::Cow;
use crate::push::{PushRule, PushRules};
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
BTreeMap::new(),
BTreeMap::new(),
false,
flags,
true,
)
.unwrap();
// first test: are the master and contains_user_name rules excluded from the "requires room
// version condition" check?
let mut result = evaluator.run(
&FilteredPushRules::default(),
Some("@bob:example.org"),
None,
);
assert_eq!(result.len(), 3);
// second test: if an appropriate push rule is in play, does it get handled?
let custom_rule = PushRule {
rule_id: Cow::from("global/underride/.org.example.extensible"),
priority_class: 1, // underride
conditions: Cow::from(vec![Condition::Known(
KnownCondition::RoomVersionSupports {
feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
},
)]),
actions: Cow::from(vec![Action::Notify]),
default: false,
default_enabled: true,
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
None,
None,
);
assert_eq!(result.len(), 1);
}

View File

@@ -277,10 +277,6 @@ pub enum KnownCondition {
SenderNotificationPermission {
key: Cow<'static, str>,
},
#[serde(rename = "org.matrix.msc3931.room_version_supports")]
RoomVersionSupports {
feature: Cow<'static, str>,
},
}
impl IntoPy<PyObject> for Condition {
@@ -412,7 +408,6 @@ pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3664_enabled: bool,
msc1767_enabled: bool,
}
#[pymethods]
@@ -422,13 +417,11 @@ impl FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3664_enabled: bool,
msc1767_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
msc3664_enabled,
msc1767_enabled,
}
}
@@ -453,10 +446,6 @@ impl FilteredPushRules {
return false;
}
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
return false;
}
true
})
.map(|r| {
@@ -502,18 +491,6 @@ fn test_deserialize_unstable_msc3664_condition() {
));
}
#[test]
fn test_deserialize_unstable_msc3931_condition() {
let json =
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
));
}
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;

View File

@@ -1,247 +0,0 @@
use std::hash::Hash;
use anyhow::Error;
use pyo3::{
pyclass, pymethods,
types::{PyModule, PyTuple},
IntoPy, PyAny, PyObject, PyResult, Python, ToPyObject,
};
use super::TreeCache;
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
let child_module = PyModule::new(py, "tree_cache")?;
child_module.add_class::<PythonTreeCache>()?;
child_module.add_class::<StringTreeCache>()?;
m.add_submodule(child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
py.import("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.tree_cache", child_module)?;
Ok(())
}
#[derive(Clone)]
struct HashablePyObject {
obj: PyObject,
hash: isize,
}
impl HashablePyObject {
pub fn new(obj: &PyAny) -> Result<Self, Error> {
let hash = obj.hash()?;
Ok(HashablePyObject {
obj: obj.to_object(obj.py()),
hash,
})
}
}
impl IntoPy<PyObject> for HashablePyObject {
fn into_py(self, _: Python<'_>) -> PyObject {
self.obj.clone()
}
}
impl IntoPy<PyObject> for &HashablePyObject {
fn into_py(self, _: Python<'_>) -> PyObject {
self.obj.clone()
}
}
impl ToPyObject for HashablePyObject {
fn to_object(&self, _py: Python<'_>) -> PyObject {
self.obj.clone()
}
}
impl Hash for HashablePyObject {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.hash.hash(state);
}
}
impl PartialEq for HashablePyObject {
fn eq(&self, other: &Self) -> bool {
let equal = Python::with_gil(|py| {
let result = self.obj.as_ref(py).eq(other.obj.as_ref(py));
result.unwrap_or(false)
});
equal
}
}
impl Eq for HashablePyObject {}
#[pyclass]
struct PythonTreeCache(TreeCache<HashablePyObject, PyObject>);
#[pymethods]
impl PythonTreeCache {
#[new]
fn new() -> Self {
PythonTreeCache(Default::default())
}
pub fn set(&mut self, key: &PyAny, value: PyObject) -> Result<(), Error> {
let v: Vec<HashablePyObject> = key
.iter()?
.map(|obj| HashablePyObject::new(obj?))
.collect::<Result<_, _>>()?;
self.0.set(v, value)?;
Ok(())
}
pub fn get_node<'a>(
&'a self,
py: Python<'a>,
key: &'a PyAny,
) -> Result<Option<Vec<(&'a PyTuple, &'a PyObject)>>, Error> {
let v: Vec<HashablePyObject> = key
.iter()?
.map(|obj| HashablePyObject::new(obj?))
.collect::<Result<_, _>>()?;
let Some(node) = self.0.get_node(v.clone())? else {
return Ok(None)
};
let items = node
.items()
.map(|(k, value)| {
let vec = v.iter().chain(k.iter().map(|a| *a)).collect::<Vec<_>>();
let nk = PyTuple::new(py, vec);
(nk, value)
})
.collect::<Vec<_>>();
Ok(Some(items))
}
pub fn get(&self, key: &PyAny) -> Result<Option<&PyObject>, Error> {
let v: Vec<HashablePyObject> = key
.iter()?
.map(|obj| HashablePyObject::new(obj?))
.collect::<Result<_, _>>()?;
Ok(self.0.get(&v)?)
}
pub fn pop_node<'a>(
&'a mut self,
py: Python<'a>,
key: &'a PyAny,
) -> Result<Option<Vec<(&'a PyTuple, PyObject)>>, Error> {
let v: Vec<HashablePyObject> = key
.iter()?
.map(|obj| HashablePyObject::new(obj?))
.collect::<Result<_, _>>()?;
let Some(node) = self.0.pop_node(v.clone())? else {
return Ok(None)
};
let items = node
.into_items()
.map(|(k, value)| {
let vec = v.iter().chain(k.iter()).collect::<Vec<_>>();
let nk = PyTuple::new(py, vec);
(nk, value)
})
.collect::<Vec<_>>();
Ok(Some(items))
}
pub fn pop(&mut self, key: &PyAny) -> Result<Option<PyObject>, Error> {
let v: Vec<HashablePyObject> = key
.iter()?
.map(|obj| HashablePyObject::new(obj?))
.collect::<Result<_, _>>()?;
Ok(self.0.pop(&v)?)
}
pub fn clear(&mut self) {
self.0.clear()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn values(&self) -> Vec<&PyObject> {
self.0.values().collect()
}
pub fn items(&self) -> Vec<(Vec<&HashablePyObject>, &PyObject)> {
todo!()
}
}
#[pyclass]
struct StringTreeCache(TreeCache<String, String>);
#[pymethods]
impl StringTreeCache {
#[new]
fn new() -> Self {
StringTreeCache(Default::default())
}
pub fn set(&mut self, key: &PyAny, value: String) -> Result<(), Error> {
let key = key
.iter()?
.map(|o| o.expect("iter failed").extract().expect("not a string"));
self.0.set(key, value)?;
Ok(())
}
// pub fn get_node(&self, key: &PyAny) -> Result<Option<&TreeCacheNode<K, PyObject>>, Error> {
// todo!()
// }
pub fn get(&self, key: &PyAny) -> Result<Option<&String>, Error> {
let key = key.iter()?.map(|o| {
o.expect("iter failed")
.extract::<String>()
.expect("not a string")
});
Ok(self.0.get(key)?)
}
// pub fn pop_node(&mut self, key: &PyAny) -> Result<Option<TreeCacheNode<K, PyObject>>, Error> {
// todo!()
// }
pub fn pop(&mut self, key: Vec<String>) -> Result<Option<String>, Error> {
Ok(self.0.pop(&key)?)
}
pub fn clear(&mut self) {
self.0.clear()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn values(&self) -> Vec<&String> {
self.0.values().collect()
}
pub fn items(&self) -> Vec<(Vec<&HashablePyObject>, &PyObject)> {
todo!()
}
}

View File

@@ -1,421 +0,0 @@
use std::{borrow::Borrow, collections::HashMap, hash::Hash};
use anyhow::{bail, Error};
pub mod binding;
pub enum TreeCacheNode<K, V> {
Leaf(V),
Branch(usize, HashMap<K, TreeCacheNode<K, V>>),
}
impl<K, V> TreeCacheNode<K, V> {
pub fn new_branch() -> Self {
TreeCacheNode::Branch(0, Default::default())
}
fn len(&self) -> usize {
match self {
TreeCacheNode::Leaf(_) => 1,
TreeCacheNode::Branch(size, _) => *size,
}
}
}
impl<'a, K: Eq + Hash + 'a, V> TreeCacheNode<K, V> {
pub fn set(
&mut self,
mut key: impl Iterator<Item = K>,
value: V,
) -> Result<(usize, usize), Error> {
if let Some(k) = key.next() {
match self {
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
TreeCacheNode::Branch(size, map) => {
let node = map.entry(k).or_insert_with(TreeCacheNode::new_branch);
let (added, removed) = node.set(key, value)?;
*size += added;
*size -= removed;
Ok((added, removed))
}
}
} else {
let added = if let TreeCacheNode::Branch(_, map) = self {
(1, map.len())
} else {
(0, 0)
};
*self = TreeCacheNode::Leaf(value);
Ok(added)
}
}
pub fn pop<Q>(
&mut self,
current_key: Q,
mut next_keys: impl Iterator<Item = Q>,
) -> Result<Option<TreeCacheNode<K, V>>, Error>
where
Q: Borrow<K>,
Q: Hash + Eq + 'a,
{
if let Some(next_key) = next_keys.next() {
match self {
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
TreeCacheNode::Branch(size, map) => {
let node = if let Some(node) = map.get_mut(current_key.borrow()) {
node
} else {
return Ok(None);
};
if let Some(popped) = node.pop(next_key, next_keys)? {
*size -= node.len();
Ok(Some(popped))
} else {
Ok(None)
}
}
}
} else {
match self {
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
TreeCacheNode::Branch(size, map) => {
if let Some(node) = map.remove(current_key.borrow()) {
*size -= node.len();
Ok(Some(node))
} else {
Ok(None)
}
}
}
}
}
pub fn items(&'a self) -> impl Iterator<Item = (Vec<&K>, &V)> {
// To avoid a lot of mallocs we guess the length of the key. Ideally
// we'd know this.
let capacity_guesstimate = 10;
let mut stack = vec![(Vec::with_capacity(capacity_guesstimate), self)];
std::iter::from_fn(move || {
while let Some((prefix, node)) = stack.pop() {
match node {
TreeCacheNode::Leaf(value) => return Some((prefix, value)),
TreeCacheNode::Branch(_, map) => {
stack.extend(map.iter().map(|(k, v)| {
let mut new_prefix = Vec::with_capacity(capacity_guesstimate);
new_prefix.extend_from_slice(&prefix);
new_prefix.push(k);
(new_prefix, v)
}));
}
}
}
None
})
}
pub fn values(&'a self) -> impl Iterator<Item = &V> {
let mut stack = vec![self];
std::iter::from_fn(move || {
while let Some(node) = stack.pop() {
match node {
TreeCacheNode::Leaf(value) => return Some(value),
TreeCacheNode::Branch(_, map) => {
stack.extend(map.iter().map(|(_k, v)| v));
}
}
}
None
})
}
}
impl<'a, K: Clone + Eq + Hash + 'a, V> TreeCacheNode<K, V> {
pub fn into_items(self) -> impl Iterator<Item = (Vec<K>, V)> {
let mut stack = vec![(Vec::new(), self)];
std::iter::from_fn(move || {
while let Some((prefix, node)) = stack.pop() {
match node {
TreeCacheNode::Leaf(value) => return Some((prefix, value)),
TreeCacheNode::Branch(_, map) => {
stack.extend(map.into_iter().map(|(k, v)| {
let mut prefix = prefix.clone();
prefix.push(k);
(prefix, v)
}));
}
}
}
None
})
}
}
impl<K, V> Default for TreeCacheNode<K, V> {
fn default() -> Self {
TreeCacheNode::new_branch()
}
}
pub struct TreeCache<K, V> {
root: TreeCacheNode<K, V>,
}
impl<K, V> TreeCache<K, V> {
pub fn new() -> Self {
TreeCache {
root: TreeCacheNode::new_branch(),
}
}
}
impl<'a, K: Eq + Hash + 'a, V> TreeCache<K, V> {
pub fn set(&mut self, key: impl IntoIterator<Item = K>, value: V) -> Result<(), Error> {
self.root.set(key.into_iter(), value)?;
Ok(())
}
pub fn get_node<Q>(
&self,
key: impl IntoIterator<Item = Q>,
) -> Result<Option<&TreeCacheNode<K, V>>, Error>
where
Q: Borrow<K>,
Q: Hash + Eq + 'a,
{
let mut node = &self.root;
for k in key {
match node {
TreeCacheNode::Leaf(_) => bail!("Given key is too long"),
TreeCacheNode::Branch(_, map) => {
node = if let Some(node) = map.get(k.borrow()) {
node
} else {
return Ok(None);
};
}
}
}
Ok(Some(node))
}
pub fn get<Q>(&self, key: impl IntoIterator<Item = Q>) -> Result<Option<&V>, Error>
where
Q: Borrow<K>,
Q: Hash + Eq + 'a,
{
if let Some(node) = self.get_node(key)? {
match node {
TreeCacheNode::Leaf(value) => Ok(Some(value)),
TreeCacheNode::Branch(_, _) => bail!("Given key is too short"),
}
} else {
Ok(None)
}
}
pub fn pop_node<Q>(
&mut self,
key: impl IntoIterator<Item = Q>,
) -> Result<Option<TreeCacheNode<K, V>>, Error>
where
Q: Borrow<K>,
Q: Hash + Eq + 'a,
{
let mut key_iter = key.into_iter();
let k = if let Some(k) = key_iter.next() {
k
} else {
let node = std::mem::replace(&mut self.root, TreeCacheNode::new_branch());
return Ok(Some(node));
};
self.root.pop(k, key_iter)
}
pub fn pop(&mut self, key: &[K]) -> Result<Option<V>, Error> {
if let Some(node) = self.pop_node(key)? {
match node {
TreeCacheNode::Leaf(value) => Ok(Some(value)),
TreeCacheNode::Branch(_, _) => bail!("Given key is too short"),
}
} else {
Ok(None)
}
}
pub fn clear(&mut self) {
self.root = TreeCacheNode::new_branch();
}
pub fn len(&self) -> usize {
match self.root {
TreeCacheNode::Leaf(_) => 1,
TreeCacheNode::Branch(size, _) => size,
}
}
pub fn values(&self) -> impl Iterator<Item = &V> {
let mut stack = vec![&self.root];
std::iter::from_fn(move || {
while let Some(node) = stack.pop() {
match node {
TreeCacheNode::Leaf(value) => return Some(value),
TreeCacheNode::Branch(_, map) => {
stack.extend(map.values());
}
}
}
None
})
}
pub fn items(&self) -> impl Iterator<Item = (Vec<&K>, &V)> {
self.root.items()
}
}
impl<K, V> Default for TreeCache<K, V> {
fn default() -> Self {
TreeCache::new()
}
}
#[cfg(test)]
mod test {
use std::collections::BTreeSet;
use super::*;
#[test]
fn get_set() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
assert_eq!(cache.get(&["a", "b"])?, Some(&"c"));
let node = cache.get_node(&["a"])?.unwrap();
match node {
TreeCacheNode::Leaf(_) => bail!("expected branch"),
TreeCacheNode::Branch(_, map) => {
assert_eq!(map.len(), 1);
assert!(map.contains_key("b"));
}
}
Ok(())
}
#[test]
fn length() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
assert_eq!(cache.len(), 1);
cache.set(vec!["a", "b"], "d")?;
assert_eq!(cache.len(), 1);
cache.set(vec!["e", "f"], "g")?;
assert_eq!(cache.len(), 2);
cache.set(vec!["e", "h"], "i")?;
assert_eq!(cache.len(), 3);
cache.set(vec!["e"], "i")?;
assert_eq!(cache.len(), 2);
cache.pop_node(&["a"])?;
assert_eq!(cache.len(), 1);
Ok(())
}
#[test]
fn clear() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
assert_eq!(cache.len(), 1);
cache.clear();
assert_eq!(cache.len(), 0);
assert_eq!(cache.get(&["a", "b"])?, None);
Ok(())
}
#[test]
fn pop() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
assert_eq!(cache.pop(&["a", "b"])?, Some("c"));
assert_eq!(cache.pop(&["a", "b"])?, None);
Ok(())
}
#[test]
fn values() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
let expected = ["c"].iter().collect();
assert_eq!(cache.values().collect::<BTreeSet<_>>(), expected);
cache.set(vec!["d", "e"], "f")?;
let expected = ["c", "f"].iter().collect();
assert_eq!(cache.values().collect::<BTreeSet<_>>(), expected);
Ok(())
}
#[test]
fn items() -> Result<(), Error> {
let mut cache = TreeCache::new();
cache.set(vec!["a", "b"], "c")?;
cache.set(vec!["d", "e"], "f")?;
let expected = [(vec![&"a", &"b"], &"c"), (vec![&"d", &"e"], &"f")]
.into_iter()
.collect();
assert_eq!(cache.items().collect::<BTreeSet<_>>(), expected);
Ok(())
}
}

View File

@@ -0,0 +1,171 @@
import argparse
import datetime
import json
import sys
import subprocess
import logging
from typing import Dict, Any, Sequence, Tuple, List
logger = logging.getLogger(__name__)
def execute_query(query: str) -> Dict[str, Any]:
stdout = subprocess.check_output(
[
"gh",
"api",
"graphql",
"-f",
"query=" + query
]
)
logger.debug("QUERY: %s", query)
result = json.loads(stdout)
logger.debug("RESULT: %s", result)
return result
def execute_query_paginate(query: str) -> List[Dict[str, Any]]:
results = []
logger.debug("PAGINATED QUERY: %s", query)
args = [
"gh",
"api",
"graphql",
"--paginate",
"-f", "query=" + query,
# Use --jq to force each pagination to land on a new line, c.f.
# https://github.com/cli/cli/issues/1268#issuecomment-1261505503
"--jq", ".",
]
stdout = subprocess.check_output(args)
for i, line in enumerate(stdout.splitlines()):
if line:
result = json.loads(line)
logger.debug("RESULT %i: %s", i, result)
results.append(result)
return results
SYNAPSE_PROJECT_ID = "PVT_kwDOAIB0Bs4ABmip"
def determine_iteration_ids() -> Tuple[str, str]:
result = execute_query(
"""
{
node(id: "%s") {
... on ProjectV2 {
field(name:"Week") {
... on ProjectV2IterationField {
configuration {
completedIterations {
id
title
startDate
duration
}
iterations {
id
title
startDate
duration
}
}
}
}
}
}
}
"""
% (SYNAPSE_PROJECT_ID,)
)
config = result["data"]["node"]["field"]["configuration"]
completed = config["completedIterations"]
previous = max(completed, key=lambda d: datetime.date.fromisoformat(d["startDate"]))
outstanding = config["iterations"]
current = min(
outstanding, key=lambda d: datetime.date.fromisoformat(d["startDate"])
)
logger.info(
"Previous iteration: %s (%s) starting, %s ending %s",
previous["id"],
previous["title"],
previous["startDate"],
datetime.date.fromisoformat(previous["startDate"])
+ datetime.timedelta(days=previous["duration"]),
)
logger.info(
"Current iteration: %s (%s) starting, %s ending %s",
current["id"],
current["title"],
current["startDate"],
datetime.date.fromisoformat(current["startDate"])
+ datetime.timedelta(days=current["duration"]),
)
return previous["id"], current["id"]
def fetch_outstanding_items(previous_iteration: str) -> List[str]:
results = execute_query_paginate(
"""
query($endCursor: String) {
node(id: "%s") {
... on ProjectV2 {
items (first: 50, after: $endCursor) {
nodes {
id
fieldValueByName(name: "Week") {
... on ProjectV2ItemFieldIterationValue {
iterationId
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
}
"""
% (SYNAPSE_PROJECT_ID,)
)
outstanding = []
for result in results:
for node in result["data"]["node"]["items"]["nodes"]:
if (node["fieldValueByName"] or {}).get("iterationId") == previous_iteration:
outstanding.append(node["id"])
return outstanding
def main(argv: Sequence[str]) -> int:
args = parser.parse_args(argv)
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
previous_iter, current_iter = determine_iteration_ids()
outstanding = fetch_outstanding_items(previous_iter)
for item_id in outstanding:
print(item_id)
# TODO: filter out the items which are archived or status: done
# TODO: print out the remaining items' titles, assignee, repo, issue/PR number, status column
# TODO: prompt user to confirm moving those from week A to week B
# TODO: do the moves
return 0
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true")
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))

View File

@@ -162,9 +162,9 @@ else
# We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with.
#
# The tests for importing historical messages (MSC2716) also only pass with monoliths,
# currently.
test_tags="$test_tags,faster_joins,msc2716"
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
# also only pass with monoliths, currently.
test_tags="$test_tags,faster_joins,msc2716,msc3030"
fi

View File

@@ -26,11 +26,7 @@ class PushRules:
class FilteredPushRules:
def __init__(
self,
push_rules: PushRules,
enabled_map: Dict[str, bool],
msc3664_enabled: bool,
msc1767_enabled: bool,
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@@ -45,8 +41,6 @@ class PushRuleEvaluator:
notification_power_levels: Mapping[str, int],
related_events_flattened: Mapping[str, Mapping[str, str]],
related_event_match_enabled: bool,
room_version_feature_flags: list[str],
msc3931_enabled: bool,
): ...
def run(
self,

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, List, Optional
from typing import Callable, Dict, Optional
import attr
@@ -51,13 +51,6 @@ class RoomDisposition:
UNSTABLE = "unstable"
class PushRuleRoomFlag:
"""Enum for listing possible MSC3931 room version feature flags, for push rules"""
# MSC3932: Room version supports MSC1767 Extensible Events.
EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomVersion:
"""An object which describes the unique attributes of a room version."""
@@ -98,12 +91,6 @@ class RoomVersion:
msc3787_knock_restricted_join_rule: bool
# MSC3667: Enforce integer power levels
msc3667_int_only_power_levels: bool
# MSC3931: Adds a push rule condition for "room version feature flags", making
# some push rules room version dependent. Note that adding a flag to this list
# is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used.
msc3931_push_features: List[str] # values from PushRuleRoomFlag
class RoomVersions:
@@ -124,7 +111,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V2 = RoomVersion(
"2",
@@ -143,7 +129,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V3 = RoomVersion(
"3",
@@ -162,7 +147,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V4 = RoomVersion(
"4",
@@ -181,7 +165,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V5 = RoomVersion(
"5",
@@ -200,7 +183,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V6 = RoomVersion(
"6",
@@ -219,7 +201,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@@ -238,7 +219,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V7 = RoomVersion(
"7",
@@ -257,7 +237,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V8 = RoomVersion(
"8",
@@ -276,7 +255,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V9 = RoomVersion(
"9",
@@ -295,7 +273,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
MSC3787 = RoomVersion(
"org.matrix.msc3787",
@@ -314,7 +291,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
V10 = RoomVersion(
"10",
@@ -333,7 +309,6 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3931_push_features=[],
)
MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4",
@@ -352,27 +327,6 @@ class RoomVersions:
msc2716_redactions=True,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
msc3931_push_features=[],
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
"org.matrix.msc1767.10",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
msc2176_redaction_rules=False,
msc3083_join_rules=True,
msc3375_redaction_rules=True,
msc2403_knocking=True,
msc2716_historical=False,
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
)

View File

@@ -266,18 +266,26 @@ def register_start(
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
def listen_metrics(
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
) -> None:
"""
Start Prometheus metrics server.
"""
from prometheus_client import start_http_server as start_http_server_prometheus
from synapse.metrics import RegistryProxy
from synapse.metrics import (
RegistryProxy,
start_http_server as start_http_server_legacy,
)
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
_set_prometheus_client_use_created_metrics(False)
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
if enable_legacy_metric_names:
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
else:
_set_prometheus_client_use_created_metrics(False)
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:

View File

@@ -44,8 +44,40 @@ from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.rest import ClientRestResource
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
events,
initial_sync,
login,
presence,
profile,
push_rule,
read_marker,
receipts,
relations,
room,
room_batch,
room_keys,
sendtodevice,
sync,
tags,
user_directory,
versions,
voip,
)
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
KeyUploadServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.register import (
RegisterRestServlet,
RegistrationTokenValidityRestServlet,
)
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -168,7 +200,45 @@ class GenericWorkerServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource: Resource = ClientRestResource(self)
resource = JsonResource(self, canonical_json=False)
RegisterRestServlet(self).register(resource)
RegistrationTokenValidityRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
WhoamiRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
# Read-only
KeyUploadServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
OneTimeKeyServlet(self).register(resource)
voip.register_servlets(self, resource)
push_rule.register_servlets(self, resource)
versions.register_servlets(self, resource)
profile.register_servlets(self, resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, is_worker=True)
relations.register_servlets(self, resource)
room.register_deprecated_servlets(self, resource)
initial_sync.register_servlets(self, resource)
room_batch.register_servlets(self, resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
receipts.register_servlets(self, resource)
read_marker.register_servlets(self, resource)
sendtodevice.register_servlets(self, resource)
user_directory.register_servlets(self, resource)
presence.register_servlets(self, resource)
resources[CLIENT_API_PREFIX] = resource
@@ -250,6 +320,7 @@ class GenericWorkerServer(HomeServer):
_base.listen_metrics(
listener.bind_addresses,
listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
logger.warning("Unsupported listener type: %s", listener.type)

View File

@@ -265,6 +265,7 @@ class SynapseHomeServer(HomeServer):
_base.listen_metrics(
listener.bind_addresses,
listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
# this shouldn't happen, as the listener type should have been checked

View File

@@ -32,9 +32,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# Type for the `device_one_time_keys_count` field in an appservice transaction
# Type for the `device_one_time_key_counts` field in an appservice transaction
# user ID -> {device ID -> {algorithm -> count}}
TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
# Type for the `device_unused_fallback_key_types` field in an appservice transaction
# user ID -> {device ID -> [algorithm]}
@@ -376,7 +376,7 @@ class AppServiceTransaction:
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
one_time_keys_count: TransactionOneTimeKeysCount,
one_time_key_counts: TransactionOneTimeKeyCounts,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
):
@@ -385,7 +385,7 @@ class AppServiceTransaction:
self.events = events
self.ephemeral = ephemeral
self.to_device_messages = to_device_messages
self.one_time_keys_count = one_time_keys_count
self.one_time_key_counts = one_time_key_counts
self.unused_fallback_keys = unused_fallback_keys
self.device_list_summary = device_list_summary
@@ -402,7 +402,7 @@ class AppServiceTransaction:
events=self.events,
ephemeral=self.ephemeral,
to_device_messages=self.to_device_messages,
one_time_keys_count=self.one_time_keys_count,
one_time_key_counts=self.one_time_key_counts,
unused_fallback_keys=self.unused_fallback_keys,
device_list_summary=self.device_list_summary,
txn_id=self.id,

View File

@@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException
from synapse.appservice import (
ApplicationService,
TransactionOneTimeKeysCount,
TransactionOneTimeKeyCounts,
TransactionUnusedFallbackKeys,
)
from synapse.events import EventBase
@@ -262,7 +262,7 @@ class ApplicationServiceApi(SimpleHttpClient):
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
one_time_keys_count: TransactionOneTimeKeysCount,
one_time_key_counts: TransactionOneTimeKeyCounts,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
txn_id: Optional[int] = None,
@@ -310,13 +310,10 @@ class ApplicationServiceApi(SimpleHttpClient):
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
if service.msc3202_transaction_extensions:
if one_time_keys_count:
if one_time_key_counts:
body[
"org.matrix.msc3202.device_one_time_key_counts"
] = one_time_keys_count
body[
"org.matrix.msc3202.device_one_time_keys_count"
] = one_time_keys_count
] = one_time_key_counts
if unused_fallback_keys:
body[
"org.matrix.msc3202.device_unused_fallback_key_types"

View File

@@ -64,7 +64,7 @@ from typing import (
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
TransactionOneTimeKeysCount,
TransactionOneTimeKeyCounts,
TransactionUnusedFallbackKeys,
)
from synapse.appservice.api import ApplicationServiceApi
@@ -258,7 +258,7 @@ class _ServiceQueuer:
):
return
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
if (
@@ -269,7 +269,7 @@ class _ServiceQueuer:
# for the users which are mentioned in this transaction,
# as well as the appservice's sender.
(
one_time_keys_count,
one_time_key_counts,
unused_fallback_keys,
) = await self._compute_msc3202_otk_counts_and_fallback_keys(
service, events, ephemeral, to_device_messages_to_send
@@ -281,7 +281,7 @@ class _ServiceQueuer:
events,
ephemeral,
to_device_messages_to_send,
one_time_keys_count,
one_time_key_counts,
unused_fallback_keys,
device_list_summary,
)
@@ -296,7 +296,7 @@ class _ServiceQueuer:
events: Iterable[EventBase],
ephemerals: Iterable[JsonDict],
to_device_messages: Iterable[JsonDict],
) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
"""
Given a list of the events, ephemeral messages and to-device messages,
- first computes a list of application services users that may have
@@ -367,7 +367,7 @@ class _TransactionController:
events: List[EventBase],
ephemeral: Optional[List[JsonDict]] = None,
to_device_messages: Optional[List[JsonDict]] = None,
one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
device_list_summary: Optional[DeviceListUpdates] = None,
) -> None:
@@ -380,7 +380,7 @@ class _TransactionController:
events: The persistent events to include in the transaction.
ephemeral: The ephemeral events to include in the transaction.
to_device_messages: The to-device messages to include in the transaction.
one_time_keys_count: Counts of remaining one-time keys for relevant
one_time_key_counts: Counts of remaining one-time keys for relevant
appservice devices in the transaction.
unused_fallback_keys: Lists of unused fallback keys for relevant
appservice devices in the transaction.
@@ -397,7 +397,7 @@ class _TransactionController:
events=events,
ephemeral=ephemeral or [],
to_device_messages=to_device_messages or [],
one_time_keys_count=one_time_keys_count or {},
one_time_key_counts=one_time_key_counts or {},
unused_fallback_keys=unused_fallback_keys or {},
device_list_summary=device_list_summary or DeviceListUpdates(),
)

View File

@@ -16,7 +16,6 @@ from typing import Any, Optional
import attr
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config._base import Config
from synapse.types import JsonDict
@@ -54,6 +53,9 @@ class ExperimentalConfig(Config):
# MSC3266 (room summary api)
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
# MSC3030 (Jump to date API endpoint)
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
# MSC2409 (this setting only relates to optionally sending to-device messages).
# Presence, typing and read receipt EDUs are already sent to application services that
# have opted in to receive them. If enabled, this adds to-device messages to that list.
@@ -129,10 +131,3 @@ class ExperimentalConfig(Config):
# MSC3912: Relation-based redactions.
self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
# MSC1767 and friends: Extensible Events
self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
if self.msc1767_enabled:
# Enable room version (and thus applicable push rules from MSC3931/3932)
version_id = RoomVersions.MSC1767v10.identifier
KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10

View File

@@ -43,6 +43,8 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"

View File

@@ -26,7 +26,6 @@ class PushConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
push_config = config.get("push") or {}
self.push_include_content = push_config.get("include_content", True)
self.enable_push = push_config.get("enabled", True)
self.push_group_unread_count_by_room = push_config.get(
"group_unread_count_by_room", True
)

View File

@@ -14,6 +14,7 @@
import abc
import logging
import urllib
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
import attr
@@ -812,27 +813,31 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
results = {}
async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
server_name = key_to_fetch_item.server_name
key_ids = key_to_fetch_item.key_ids
try:
keys = await self.get_server_verify_keys_v2_direct(server_name)
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
results[server_name] = keys
except KeyLookupError as e:
logger.warning("Error looking up keys from %s: %s", server_name, e)
logger.warning(
"Error looking up keys %s from %s: %s", key_ids, server_name, e
)
except Exception:
logger.exception("Error getting keys from %s", server_name)
logger.exception("Error getting keys %s from %s", key_ids, server_name)
await yieldable_gather_results(get_keys, keys_to_fetch)
await yieldable_gather_results(get_key, keys_to_fetch)
return results
async def get_server_verify_keys_v2_direct(
self, server_name: str
async def get_server_verify_key_v2_direct(
self, server_name: str, key_ids: Iterable[str]
) -> Dict[str, FetchKeyResult]:
"""
Args:
server_name: Server to request keys from
server_name:
key_ids:
Returns:
Map from key ID to lookup result
@@ -840,41 +845,57 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Raises:
KeyLookupError if there was a problem making the lookup
"""
time_now_ms = self.clock.time_msec()
try:
response = await self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server",
ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's
# probably not going to.
#
# Furthermore, when we are acting as a notary server, we cannot
# wait all day for all of the origin servers, as the requesting
# server will otherwise time out before we can respond.
#
# (Note that get_json may make 4 attempts, so this can still take
# almost 45 seconds to fetch the headers, plus up to another 60s to
# read the response).
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
# these both have str() representations which we can't really improve
# upon
raise KeyLookupError(str(e))
except HttpResponseException as e:
raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys: Dict[str, FetchKeyResult] = {}
assert isinstance(response, dict)
if response["server_name"] != server_name:
raise KeyLookupError(
"Expected a response for server %r not %r"
% (server_name, response["server_name"])
)
for requested_key_id in key_ids:
# we may have found this key as a side-effect of asking for another.
if requested_key_id in keys:
continue
return await self.process_v2_response(
from_server=server_name,
response_json=response,
time_added_ms=time_now_ms,
)
time_now_ms = self.clock.time_msec()
try:
response = await self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/"
+ urllib.parse.quote(requested_key_id, safe=""),
ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's
# probably not going to.
#
# Furthermore, when we are acting as a notary server, we cannot
# wait all day for all of the origin servers, as the requesting
# server will otherwise time out before we can respond.
#
# (Note that get_json may make 4 attempts, so this can still take
# almost 45 seconds to fetch the headers, plus up to another 60s to
# read the response).
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
# these both have str() representations which we can't really improve
# upon
raise KeyLookupError(str(e))
except HttpResponseException as e:
raise KeyLookupError("Remote server returned an error: %s" % (e,))
assert isinstance(response, dict)
if response["server_name"] != server_name:
raise KeyLookupError(
"Expected a response for server %r not %r"
% (server_name, response["server_name"])
)
response_keys = await self.process_v2_response(
from_server=server_name,
response_json=response,
time_added_ms=time_now_ms,
)
await self.store.store_server_verify_keys(
server_name,
time_now_ms,
((server_name, key_id, key) for key_id, key in response_keys.items()),
)
keys.update(response_keys)
return keys

View File

@@ -1691,19 +1691,9 @@ class FederationClient(FederationBase):
# to return events on *both* sides of the timestamp to
# help reconcile the gap faster.
_timestamp_to_event_from_destination,
# Since this endpoint is new, we should try other servers before giving up.
# We can safely remove this in a year (remove after 2023-11-16).
failover_on_unknown_endpoint=True,
)
return timestamp_to_event_response
except SynapseError as e:
logger.warn(
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
room_id,
timestamp,
direction,
e,
)
except SynapseError:
return None
async def _timestamp_to_event_from_destination(

View File

@@ -434,23 +434,7 @@ class FederationSender(AbstractFederationSender):
# If there are no prev event IDs then the state is empty
# and so no remote servers in the room
destinations = set()
if destinations is None:
# During partial join we use the set of servers that we got
# when beginning the join. It's still possible that we send
# events to servers that left the room in the meantime, but
# we consider that an acceptable risk since it is only our own
# events that we leak and not other server's ones.
partial_state_destinations = (
await self.store.get_partial_state_servers_at_join(
event.room_id
)
)
if len(partial_state_destinations) > 0:
destinations = partial_state_destinations
if destinations is None:
else:
# We check the external cache for the destinations, which is
# stored per state group.
@@ -647,7 +631,7 @@ class FederationSender(AbstractFederationSender):
room_id = receipt.room_id
# Work out which remote servers should be poked and poke them.
domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
domains_set = await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
domains = [

View File

@@ -35,7 +35,7 @@ from synapse.logging import issue9533_logger
from synapse.logging.opentracing import SynapseTags, set_tag
from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import JsonDict, ReadReceipt
from synapse.types import ReadReceipt
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from synapse.visibility import filter_events_for_server
@@ -136,11 +136,8 @@ class PerDestinationQueue:
# destination
self._pending_presence: Dict[str, UserPresenceState] = {}
# List of room_id -> receipt_type -> user_id -> receipt_dict,
#
# Each receipt can only have a single receipt per
# (room ID, receipt type, user ID, thread ID) tuple.
self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
# room_id -> receipt_type -> user_id -> receipt_dict
self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
@@ -205,53 +202,17 @@ class PerDestinationQueue:
Args:
receipt: receipt to be queued
"""
serialized_receipt: JsonDict = {
"event_ids": receipt.event_ids,
"data": receipt.data,
}
if receipt.thread_id is not None:
serialized_receipt["data"]["thread_id"] = receipt.thread_id
# Find which EDU to add this receipt to. There's three situations depending
# on the (room ID, receipt type, user, thread ID) tuple:
#
# 1. If it fully matches, clobber the information.
# 2. If it is missing, add the information.
# 3. If the subset tuple of (room ID, receipt type, user) matches, check
# the next EDU (or add a new EDU).
for edu in self._pending_receipt_edus:
receipt_content = edu.setdefault(receipt.room_id, {}).setdefault(
receipt.receipt_type, {}
)
# If this room ID, receipt type, user ID is not in this EDU, OR if
# the full tuple matches, use the current EDU.
if (
receipt.user_id not in receipt_content
or receipt_content[receipt.user_id].get("thread_id")
== receipt.thread_id
):
receipt_content[receipt.user_id] = serialized_receipt
break
# If no matching EDU was found, create a new one.
else:
self._pending_receipt_edus.append(
{
receipt.room_id: {
receipt.receipt_type: {receipt.user_id: serialized_receipt}
}
}
)
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
receipt.receipt_type, {}
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
def flush_read_receipts_for_room(self, room_id: str) -> None:
# If there are any pending receipts for this room then force-flush them
# in a new transaction.
for edu in self._pending_receipt_edus:
if room_id in edu:
self._rrs_pending_flush = True
self.attempt_new_transaction()
# No use in checking remaining EDUs if the room was found.
break
# if we don't have any read-receipts for this room, it may be that we've already
# sent them out, so we don't need to flush.
if room_id not in self._pending_rrs:
return
self._rrs_pending_flush = True
self.attempt_new_transaction()
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu
@@ -390,7 +351,7 @@ class PerDestinationQueue:
self._pending_edus = []
self._pending_edus_keyed = {}
self._pending_presence = {}
self._pending_receipt_edus = []
self._pending_rrs = {}
self._start_catching_up()
except FederationDeniedError as e:
@@ -582,27 +543,22 @@ class PerDestinationQueue:
self._destination, last_successful_stream_ordering
)
def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
if not self._pending_receipt_edus:
def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
if not self._pending_rrs:
return
if not force_flush and not self._rrs_pending_flush:
# not yet time for this lot
return
# Send at most limit EDUs for receipts.
for content in self._pending_receipt_edus[:limit]:
yield Edu(
origin=self._server_name,
destination=self._destination,
edu_type=EduTypes.RECEIPT,
content=content,
)
self._pending_receipt_edus = self._pending_receipt_edus[limit:]
# If there are still pending read-receipts, don't reset the pending flush
# flag.
if not self._pending_receipt_edus:
self._rrs_pending_flush = False
edu = Edu(
origin=self._server_name,
destination=self._destination,
edu_type=EduTypes.RECEIPT,
content=self._pending_rrs,
)
self._pending_rrs = {}
self._rrs_pending_flush = False
yield edu
def _pop_pending_edus(self, limit: int) -> List[Edu]:
pending_edus = self._pending_edus
@@ -689,20 +645,40 @@ class _TransactionQueueManager:
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
# First we calculate the EDUs we want to send, if any.
# There's a maximum number of EDUs that can be sent with a transaction,
# generally device updates and to-device messages get priority, but we
# want to ensure that there's room for some other EDUs as well.
#
# This is done by:
#
# * Add a presence EDU, if one exists.
# * Add up-to a small limit of read receipt EDUs.
# * Add to-device EDUs, but leave some space for device list updates.
# * Add device list updates EDUs.
# * If there's any remaining room, add other EDUs.
pending_edus = []
# We start by fetching device related EDUs, i.e device updates and to
# device messages. We have to keep 2 free slots for presence and rr_edus.
device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
# Add presence EDU.
# We prioritize to-device messages so that existing encryption channels
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
device_edu_limit -= len(to_device_edus)
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
device_edu_limit
)
if device_update_edus:
self._device_list_id = dev_list_id
else:
self.queue._last_device_list_stream_id = dev_list_id
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.
pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
# And presence EDU.
if self.queue._pending_presence:
pending_edus.append(
Edu(
@@ -721,47 +697,16 @@ class _TransactionQueueManager:
)
self.queue._pending_presence = {}
# Add read receipt EDUs.
pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
# Next, prioritize to-device messages so that existing encryption channels
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
pending_edus.extend(to_device_edus)
edu_limit -= len(to_device_edus)
# Add device list update EDUs.
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
edu_limit
)
if device_update_edus:
self._device_list_id = dev_list_id
else:
self.queue._last_device_list_stream_id = dev_list_id
pending_edus.extend(device_update_edus)
edu_limit -= len(device_update_edus)
# Finally add any other types of EDUs if there is room.
other_edus = self.queue._pop_pending_edus(edu_limit)
pending_edus.extend(other_edus)
edu_limit -= len(other_edus)
while edu_limit > 0 and self.queue._pending_edus_keyed:
pending_edus.extend(
self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
)
while (
len(pending_edus) < MAX_EDUS_PER_TRANSACTION
and self.queue._pending_edus_keyed
):
_, val = self.queue._pending_edus_keyed.popitem()
pending_edus.append(val)
edu_limit -= 1
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
# queue
@@ -772,10 +717,8 @@ class _TransactionQueueManager:
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
if edu_limit:
pending_edus.extend(
self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
)
if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
if self._pdus:
self._last_stream_ordering = self._pdus[

View File

@@ -185,8 +185,9 @@ class TransportLayerClient:
Raises:
Various exceptions when the request fails
"""
path = _create_v1_path(
"/timestamp_to_event/%s",
path = _create_path(
FEDERATION_UNSTABLE_PREFIX,
"/org.matrix.msc3030/timestamp_to_event/%s",
room_id,
)

View File

@@ -25,6 +25,7 @@ from synapse.federation.transport.server._base import (
from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
FederationTimestampLookupServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -290,6 +291,13 @@ def register_servlets(
)
for servletclass in SERVLET_GROUPS[servlet_group]:
# Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
if (
servletclass == FederationTimestampLookupServlet
and not hs.config.experimental.msc3030_enabled
):
continue
# Only allow the `/account_status` servlet if msc3720 is enabled
if (
servletclass == FederationAccountStatusServlet

View File

@@ -218,13 +218,14 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp.
GET /_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
{
"event_id": ...
}
"""
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
async def on_GET(
self,

View File

@@ -45,7 +45,6 @@ class EventAuthHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._store = hs.get_datastores().main
self._state_storage_controller = hs.get_storage_controllers().state
self._server_name = hs.hostname
async def check_auth_rules_from_context(
@@ -180,22 +179,17 @@ class EventAuthHandler:
this function may return an incorrect result as we are not able to fully
track server membership in a room without full state.
"""
if await self._store.is_partial_state_room(room_id):
if allow_partial_state_rooms:
current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
if host not in current_hosts:
raise AuthError(403, "Host not in room (partial-state approx).")
else:
raise AuthError(
403,
"Unable to authorise you right now; room is partial-stated here.",
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
)
else:
if not await self.is_host_in_room(room_id, host):
raise AuthError(403, "Host not in room.")
if not allow_partial_state_rooms and await self._store.is_partial_state_room(
room_id
):
raise AuthError(
403,
"Unable to authorise you right now; room is partial-stated here.",
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
)
if not await self.is_host_in_room(room_id, host):
raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules(
self,

View File

@@ -1135,13 +1135,11 @@ class EventCreationHandler:
)
state_events = await self.store.get_events_as_list(state_event_ids)
# Create a StateMap[str]
current_state_ids = {
(e.type, e.state_key): e.event_id for e in state_events
}
state_map = {(e.type, e.state_key): e.event_id for e in state_events}
# Actually strip down and only use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
current_state_ids=current_state_ids,
current_state_ids=state_map,
for_verification=False,
)

View File

@@ -1435,7 +1435,6 @@ class UserAttributeDict(TypedDict):
localpart: Optional[str]
confirm_localpart: bool
display_name: Optional[str]
picture: Optional[str] # may be omitted by older `OidcMappingProviders`
emails: List[str]
@@ -1521,7 +1520,6 @@ env.filters.update(
@attr.s(slots=True, frozen=True, auto_attribs=True)
class JinjaOidcMappingConfig:
subject_claim: str
picture_claim: str
localpart_template: Optional[Template]
display_name_template: Optional[Template]
email_template: Optional[Template]
@@ -1541,7 +1539,6 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
@staticmethod
def parse_config(config: dict) -> JinjaOidcMappingConfig:
subject_claim = config.get("subject_claim", "sub")
picture_claim = config.get("picture_claim", "picture")
def parse_template_config(option_name: str) -> Optional[Template]:
if option_name not in config:
@@ -1575,7 +1572,6 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
return JinjaOidcMappingConfig(
subject_claim=subject_claim,
picture_claim=picture_claim,
localpart_template=localpart_template,
display_name_template=display_name_template,
email_template=email_template,
@@ -1615,13 +1611,10 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
if email:
emails.append(email)
picture = userinfo.get("picture")
return UserAttributeDict(
localpart=localpart,
display_name=display_name,
emails=emails,
picture=picture,
confirm_localpart=self._config.confirm_localpart,
)

View File

@@ -448,12 +448,6 @@ class PaginationHandler:
if pagin_config.from_token:
from_token = pagin_config.from_token
elif pagin_config.direction == "f":
from_token = (
await self.hs.get_event_sources().get_start_token_for_pagination(
room_id
)
)
else:
from_token = (
await self.hs.get_event_sources().get_current_token_for_pagination(

View File

@@ -1764,14 +1764,14 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
Returns:
A list of presence states for the given user to receive.
"""
updated_users = None
if from_key:
# Only return updates since the last sync
updated_users = self.store.presence_stream_cache.get_all_entities_changed(
from_key
)
if not updated_users:
updated_users = []
if updated_users is not None:
# Get the actual presence update for each change
users_to_state = await self.get_presence_handler().current_state_for_users(
updated_users

View File

@@ -92,6 +92,7 @@ class ReceiptsHandler:
continue
# Check if these receipts apply to a thread.
thread_id = None
data = user_values.get("data", {})
thread_id = data.get("thread_id")
# If the thread ID is invalid, consider it missing.

View File

@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import hashlib
import io
import logging
from typing import (
TYPE_CHECKING,
@@ -140,7 +138,6 @@ class UserAttributes:
localpart: Optional[str]
confirm_localpart: bool = False
display_name: Optional[str] = None
picture: Optional[str] = None
emails: Collection[str] = attr.Factory(list)
@@ -199,10 +196,6 @@ class SsoHandler:
self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
self._media_repo = (
hs.get_media_repository() if hs.config.media.can_load_media_repo else None
)
self._http_client = hs.get_proxied_blacklisted_http_client()
# The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window.
@@ -502,8 +495,6 @@ class SsoHandler:
await self._profile_handler.set_displayname(
user_id_obj, requester, attributes.display_name, True
)
if attributes.picture:
await self.set_avatar(user_id, attributes.picture)
await self._auth_handler.complete_sso_login(
user_id,
@@ -712,110 +703,8 @@ class SsoHandler:
await self._store.record_user_external_id(
auth_provider_id, remote_user_id, registered_user_id
)
# Set avatar, if available
if attributes.picture:
await self.set_avatar(registered_user_id, attributes.picture)
return registered_user_id
async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
"""Set avatar of the user.
This downloads the image file from the URL provided, stores that in
the media repository and then sets the avatar on the user's profile.
It can detect if the same image is being saved again and bails early by storing
the hash of the file in the `upload_name` of the avatar image.
Currently, it only supports server configurations which run the media repository
within the same process.
It silently fails and logs a warning by raising an exception and catching it
internally if:
* it is unable to fetch the image itself (non 200 status code) or
* the image supplied is bigger than max allowed size or
* the image type is not one of the allowed image types.
Args:
user_id: matrix user ID in the form @localpart:domain as a string.
picture_https_url: HTTPS url for the picture image file.
Returns: `True` if the user's avatar has been successfully set to the image at
`picture_https_url`.
"""
if self._media_repo is None:
logger.info(
"failed to set user avatar because out-of-process media repositories "
"are not supported yet "
)
return False
try:
uid = UserID.from_string(user_id)
def is_allowed_mime_type(content_type: str) -> bool:
if (
self._profile_handler.allowed_avatar_mimetypes
and content_type
not in self._profile_handler.allowed_avatar_mimetypes
):
return False
return True
# download picture, enforcing size limit & mime type check
picture = io.BytesIO()
content_length, headers, uri, code = await self._http_client.get_file(
url=picture_https_url,
output_stream=picture,
max_size=self._profile_handler.max_avatar_size,
is_allowed_content_type=is_allowed_mime_type,
)
if code != 200:
raise Exception(
"GET request to download sso avatar image returned {}".format(code)
)
# upload name includes hash of the image file's content so that we can
# easily check if it requires an update or not, the next time user logs in
upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
# bail if user already has the same avatar
profile = await self._profile_handler.get_profile(user_id)
if profile["avatar_url"] is not None:
server_name = profile["avatar_url"].split("/")[-2]
media_id = profile["avatar_url"].split("/")[-1]
if server_name == self._server_name:
media = await self._media_repo.store.get_local_media(media_id)
if media is not None and upload_name == media["upload_name"]:
logger.info("skipping saving the user avatar")
return True
# store it in media repository
avatar_mxc_url = await self._media_repo.create_content(
media_type=headers[b"Content-Type"][0].decode("utf-8"),
upload_name=upload_name,
content=picture,
content_length=content_length,
auth_user=uid,
)
# save it as user avatar
await self._profile_handler.set_avatar_url(
uid,
create_requester(uid),
str(avatar_mxc_url),
)
logger.info("successfully saved the user avatar")
return True
except Exception:
logger.warning("failed to save the user avatar")
return False
async def complete_sso_ui_auth_request(
self,
auth_provider_id: str,

View File

@@ -1426,14 +1426,14 @@ class SyncHandler:
logger.debug("Fetching OTK data")
device_id = sync_config.device_id
one_time_keys_count: JsonDict = {}
one_time_key_counts: JsonDict = {}
unused_fallback_key_types: List[str] = []
if device_id:
# TODO: We should have a way to let clients differentiate between the states of:
# * no change in OTK count since the provided since token
# * the server has zero OTKs left for this device
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
one_time_keys_count = await self.store.count_e2e_one_time_keys(
one_time_key_counts = await self.store.count_e2e_one_time_keys(
user_id, device_id
)
unused_fallback_key_types = (
@@ -1463,7 +1463,7 @@ class SyncHandler:
archived=sync_result_builder.archived,
to_device=sync_result_builder.to_device,
device_lists=device_lists,
device_one_time_keys_count=one_time_keys_count,
device_one_time_keys_count=one_time_key_counts,
device_unused_fallback_key_types=unused_fallback_key_types,
next_batch=sync_result_builder.now_token,
)

View File

@@ -47,7 +47,11 @@ from twisted.python.threadpool import ThreadPool
# This module is imported for its side effects; flake8 needn't warn that it's unused.
import synapse.metrics._reactor_metrics # noqa: F401
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
from synapse.metrics._legacy_exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
from synapse.metrics._types import Collector
from synapse.util import SYNAPSE_VERSION
@@ -470,6 +474,7 @@ __all__ = [
"Collector",
"MetricsResource",
"generate_latest",
"start_http_server",
"LaterGauge",
"InFlightGauge",
"GaugeBucketCollector",

View File

@@ -0,0 +1,288 @@
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
import logging
import math
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from typing import Any, Dict, List, Type, Union
from urllib.parse import parse_qs, urlparse
from prometheus_client import REGISTRY, CollectorRegistry
from prometheus_client.core import Sample
from twisted.web.resource import Resource
from twisted.web.server import Request
logger = logging.getLogger(__name__)
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
def floatToGoString(d: Union[int, float]) -> str:
d = float(d)
if d == math.inf:
return "+Inf"
elif d == -math.inf:
return "-Inf"
elif math.isnan(d):
return "NaN"
else:
s = repr(d)
dot = s.find(".")
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
return f"{mantissa}e+0{dot - 1}"
return s
def sample_line(line: Sample, name: str) -> str:
if line.labels:
labelstr = "{{{0}}}".format(
",".join(
[
'{}="{}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
for k, v in sorted(line.labels.items())
]
)
)
else:
labelstr = ""
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = f" {int(float(line.timestamp) * 1000):d}"
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
# Mapping from new metric names to legacy metric names.
# We translate these back to their old names when exposing them through our
# legacy vendored exporter.
# Only this legacy exposition module applies these name changes.
LEGACY_METRIC_NAMES = {
"synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
"synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
"synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
"synapse_util_caches_cache": "synapse_util_caches_cache:total",
"synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
"synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
"synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
"synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
"synapse_admin_mau_current": "synapse_admin_mau:current",
"synapse_admin_mau_max": "synapse_admin_mau:max",
"synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
}
def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
"""
Generate metrics in legacy format. Modern metrics are generated directly
by prometheus-client.
"""
output = []
for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
# Translate to legacy metric name if it has one.
mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
mnewname = metric.name
mtype = metric.type
# OpenMetrics -> Prometheus
if mtype == "counter":
mnewname = mnewname + "_total"
elif mtype == "info":
mtype = "gauge"
mnewname = mnewname + "_info"
elif mtype == "stateset":
mtype = "gauge"
elif mtype == "gaugehistogram":
mtype = "histogram"
elif mtype == "unknown":
mtype = "untyped"
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mname} {mtype}\n")
om_samples: Dict[str, List[str]] = {}
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == mname + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
# (these come from gaugehistograms which don't get renamed,
# so no need to faff with mnewname)
om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
break
else:
newname = s.name.replace(mnewname, mname)
if ":" in newname and newname.endswith("_total"):
newname = newname[: -len("_total")]
output.append(sample_line(s, newname))
for suffix, lines in sorted(om_samples.items()):
if emit_help:
output.append(
"# HELP {}{} {}\n".format(
mname,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mname}{suffix} gauge\n")
output.extend(lines)
# Get rid of the weird colon things while we're at it
if mtype == "counter":
mnewname = mnewname.replace(":total", "")
mnewname = mnewname.replace(":", "_")
if mname == mnewname:
continue
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mnewname} {mtype}\n")
for s in metric.samples:
# Get rid of the OpenMetrics specific samples (we should already have
# dealt with them above anyway.)
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == mname + suffix:
break
else:
sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
output.append(
sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
)
return "".join(output).encode("utf-8")
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self) -> None:
registry = self.registry
params = parse_qs(urlparse(self.path).query)
if "help" in params:
emit_help = True
else:
emit_help = False
try:
output = generate_latest(registry, emit_help=emit_help)
except Exception:
self.send_error(500, "error generating metric output")
raise
try:
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.send_header("Content-Length", str(len(output)))
self.end_headers()
self.wfile.write(output)
except BrokenPipeError as e:
logger.warning(
"BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
)
def log_message(self, format: str, *args: Any) -> None:
"""Log nothing."""
@classmethod
def factory(cls, registry: CollectorRegistry) -> Type:
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(
port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
) -> None:
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry: CollectorRegistry = REGISTRY):
self.registry = registry
def render_GET(self, request: Request) -> bytes:
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
response = generate_latest(self.registry)
request.setHeader(b"Content-Length", str(len(response)))
return response

Some files were not shown because too many files have changed in this diff Show More