1
0

Merge branch 'develop' into madlittlemods/hs-specific-metrics

Conflicts:
	synapse/app/generic_worker.py
	synapse/app/homeserver.py
This commit is contained in:
Eric Eastwood
2025-05-20 14:21:19 -05:00
93 changed files with 1083 additions and 313 deletions
+1 -1
View File
@@ -72,7 +72,7 @@ jobs:
- name: Build and push all platforms
id: build-and-push
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
with:
push: true
labels: |
+1 -1
View File
@@ -24,7 +24,7 @@ jobs:
mdbook-version: '0.4.17'
- name: Setup python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
+1 -1
View File
@@ -64,7 +64,7 @@ jobs:
run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js
- name: Setup python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
+1 -1
View File
@@ -86,7 +86,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- run: pip install .[all,test]
+1 -1
View File
@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.x'
- run: pip install tomli
+4 -4
View File
@@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.x'
- id: set-distros
@@ -74,7 +74,7 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.x'
@@ -132,7 +132,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
# here, because `python` on osx points to Python 2.7.
@@ -177,7 +177,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.10'
+6 -6
View File
@@ -102,7 +102,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
@@ -112,7 +112,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- run: .ci/scripts/check_lockfile.py
@@ -192,7 +192,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
@@ -279,7 +279,7 @@ jobs:
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- run: "pip install rstcheck"
@@ -327,7 +327,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- id: get-matrix
@@ -414,7 +414,7 @@ jobs:
sudo apt-get -qq install build-essential libffi-dev python3-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.9'
+10
View File
@@ -1,3 +1,13 @@
# Synapse 1.130.0 (2025-05-20)
### Bugfixes
- Fix startup being blocked on creating a new index that was introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439))
- Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447))
# Synapse 1.130.0rc1 (2025-05-13)
### Features
Generated
+2 -2
View File
@@ -316,9 +316,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.12.3"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
checksum = "45192e5e4a4d2505587e27806c7b710c231c40c56f3bfc19535d0bb25df52264"
dependencies = [
"arc-swap",
"log",
+1
View File
@@ -0,0 +1 @@
Include room ID in room deletion status response.
+1
View File
@@ -0,0 +1 @@
Fix admin redaction endpoint not redacting encrypted messages.
+1
View File
@@ -0,0 +1 @@
Add lint to ensure we don't add a `CREATE/DROP INDEX` in a schema delta.
+1
View File
@@ -0,0 +1 @@
Add advice for upgrading between major PostgreSQL versions to the database documentation.
+1
View File
@@ -0,0 +1 @@
Bump ruff from 0.7.3 to 0.11.10.
+1
View File
@@ -0,0 +1 @@
Allow checking only for the existence of a field in an SSO provider's response, rather than requiring the value(s) to check.
+1
View File
@@ -0,0 +1 @@
Add unit tests for homeserver usage statistics.
+6
View File
@@ -1,3 +1,9 @@
matrix-synapse-py3 (1.130.0) stable; urgency=medium
* New Synapse release 1.130.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 May 2025 08:34:13 -0600
matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium
* New Synapse release 1.130.0rc1.
+6 -1
View File
@@ -794,6 +794,7 @@ A response body like the following is returned:
"results": [
{
"delete_id": "delete_id1",
"room_id": "!roomid:example.com",
"status": "failed",
"error": "error message",
"shutdown_room": {
@@ -804,6 +805,7 @@ A response body like the following is returned:
}
}, {
"delete_id": "delete_id2",
"room_id": "!roomid:example.com",
"status": "purging",
"shutdown_room": {
"kicked_users": [
@@ -842,6 +844,8 @@ A response body like the following is returned:
```json
{
"status": "purging",
"delete_id": "bHkCNQpHqOaFhPtK",
"room_id": "!roomid:example.com",
"shutdown_room": {
"kicked_users": [
"@foobar:example.com"
@@ -869,7 +873,8 @@ The following fields are returned in the JSON response body:
- `results` - An array of objects, each containing information about one task.
This field is omitted from the result when you query by `delete_id`.
Task objects contain the following fields:
- `delete_id` - The ID for this purge if you query by `room_id`.
- `delete_id` - The ID for this purge
- `room_id` - The ID of the room being deleted
- `status` - The status will be one of:
- `shutting_down` - The process is removing users from the room.
- `purging` - The process is purging the room and event data from database.
+8
View File
@@ -100,6 +100,14 @@ database:
keepalives_count: 3
```
## Postgresql major version upgrades
Postgres uses separate directories for database locations between major versions (typically `/var/lib/postgresql/<version>/main`).
Therefore, it is recommended to stop Synapse and other services (MAS, etc) before upgrading Postgres major versions.
It is also strongly recommended to [back up](./usage/administration/backups.md#database) your database beforehand to ensure no data loss arising from a failed upgrade.
## Backups
Don't forget to [back up](./usage/administration/backups.md#database) your database!
@@ -30,7 +30,7 @@ The following statistics are sent to the configured reporting endpoint:
| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. |
| `total_users` | int | The number of registered users on the homeserver. |
| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. |
| `daily_user_type_native` | int | The number of native users created in the last 24 hours. |
| `daily_user_type_native` | int | The number of native, non-guest users created in the last 24 hours. |
| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. |
| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. |
| `total_room_count` | int | The total number of rooms present on the homeserver. |
@@ -50,8 +50,8 @@ The following statistics are sent to the configured reporting endpoint:
| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. |
| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. |
| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. |
| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
[^1]: Native matrix users and guests are always counted. If the
@@ -3780,17 +3780,23 @@ match particular values in the OIDC userinfo. The requirements can be listed und
```yaml
attribute_requirements:
- attribute: family_name
value: "Stephensson"
one_of: ["Stephensson", "Smith"]
- attribute: groups
value: "admin"
# If `value` or `one_of` are not specified, the attribute only needs
# to exist, regardless of value.
- attribute: picture
```
`attribute` is a required field, while `value` and `one_of` are optional.
All of the listed attributes must match for the login to be permitted. Additional attributes can be added to
userinfo by expanding the `scopes` section of the OIDC config to retrieve
additional information from the OIDC provider.
If the OIDC claim is a list, then the attribute must match any value in the list.
Otherwise, it must exactly match the value of the claim. Using the example
above, the `family_name` claim MUST be "Stephensson", but the `groups`
above, the `family_name` claim MUST be either "Stephensson" or "Smith", but the `groups`
claim MUST contain "admin".
Example configuration:
Generated
+63 -58
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -34,15 +34,15 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a
[[package]]
name = "authlib"
version = "1.5.1"
version = "1.5.2"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
files = [
{file = "authlib-1.5.1-py2.py3-none-any.whl", hash = "sha256:8408861cbd9b4ea2ff759b00b6f02fd7d81ac5a56d0b2b22c08606c6049aae11"},
{file = "authlib-1.5.1.tar.gz", hash = "sha256:5cbc85ecb0667312c1cdc2f9095680bb735883b123fb509fde1e65b1c5df972e"},
{file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"},
{file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"},
]
[package.dependencies]
@@ -451,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -494,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -544,7 +544,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
@@ -890,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -1028,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1044,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"url-preview\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"url-preview\""
files = [
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"},
{file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"},
@@ -1330,7 +1330,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1551,7 +1551,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1720,7 +1720,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"postgres\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"postgres\""
files = [
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
@@ -1728,6 +1728,7 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@@ -1740,7 +1741,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1756,7 +1757,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -1979,7 +1980,7 @@ description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"user-search\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"user-search\""
files = [
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
]
@@ -2028,7 +2029,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"cache-memory\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"cache-memory\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2063,18 +2064,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
version = "25.0.0"
version = "25.1.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
{file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
{file = "pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab"},
{file = "pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b"},
]
[package.dependencies]
cryptography = ">=41.0.5,<45"
cryptography = ">=41.0.5,<46"
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
[package.extras]
@@ -2088,7 +2089,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2113,7 +2114,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2141,7 +2142,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
@@ -2439,30 +2440,30 @@ files = [
[[package]]
name = "ruff"
version = "0.7.3"
version = "0.11.10"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
{file = "ruff-0.11.10-py3-none-linux_armv6l.whl", hash = "sha256:859a7bfa7bc8888abbea31ef8a2b411714e6a80f0d173c2a82f9041ed6b50f58"},
{file = "ruff-0.11.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:968220a57e09ea5e4fd48ed1c646419961a0570727c7e069842edd018ee8afed"},
{file = "ruff-0.11.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1067245bad978e7aa7b22f67113ecc6eb241dca0d9b696144256c3a879663bca"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4854fd09c7aed5b1590e996a81aeff0c9ff51378b084eb5a0b9cd9518e6cff2"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b4564e9f99168c0f9195a0fd5fa5928004b33b377137f978055e40008a082c5"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b6a9cc5b62c03cc1fea0044ed8576379dbaf751d5503d718c973d5418483641"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:607ecbb6f03e44c9e0a93aedacb17b4eb4f3563d00e8b474298a201622677947"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b3a522fa389402cd2137df9ddefe848f727250535c70dafa840badffb56b7a4"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f071b0deed7e9245d5820dac235cbdd4ef99d7b12ff04c330a241ad3534319f"},
{file = "ruff-0.11.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a60e3a0a617eafba1f2e4186d827759d65348fa53708ca547e384db28406a0b"},
{file = "ruff-0.11.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:da8ec977eaa4b7bf75470fb575bea2cb41a0e07c7ea9d5a0a97d13dbca697bf2"},
{file = "ruff-0.11.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ddf8967e08227d1bd95cc0851ef80d2ad9c7c0c5aab1eba31db49cf0a7b99523"},
{file = "ruff-0.11.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5a94acf798a82db188f6f36575d80609072b032105d114b0f98661e1679c9125"},
{file = "ruff-0.11.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3afead355f1d16d95630df28d4ba17fb2cb9c8dfac8d21ced14984121f639bad"},
{file = "ruff-0.11.10-py3-none-win32.whl", hash = "sha256:dc061a98d32a97211af7e7f3fa1d4ca2fcf919fb96c28f39551f35fc55bdbc19"},
{file = "ruff-0.11.10-py3-none-win_amd64.whl", hash = "sha256:5cc725fbb4d25b0f185cb42df07ab6b76c4489b4bfb740a175f3a59c70e8a224"},
{file = "ruff-0.11.10-py3-none-win_arm64.whl", hash = "sha256:ef69637b35fb8b210743926778d0e45e1bffa850a7c61e428c6b971549b5f5d1"},
{file = "ruff-0.11.10.tar.gz", hash = "sha256:d522fb204b4959909ecac47da02830daec102eeb100fb50ea9554818d47a5fa6"},
]
[[package]]
@@ -2505,7 +2506,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"sentry\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"sentry\""
files = [
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
@@ -2583,20 +2584,24 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"]
[[package]]
name = "setuptools"
version = "72.1.0"
version = "78.1.1"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main", "dev"]
files = [
{file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"},
{file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"},
{file = "setuptools-78.1.1-py3-none-any.whl", hash = "sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561"},
{file = "setuptools-78.1.1.tar.gz", hash = "sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d"},
]
[package.extras]
core = ["importlib-metadata (>=6) ; python_version < \"3.10\"", "importlib-resources (>=5.10.2) ; python_version < \"3.9\"", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-ruff (<0.4) ; platform_system == \"Windows\"", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "pytest-ruff (>=0.3.2) ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
[[package]]
name = "setuptools-rust"
@@ -2689,7 +2694,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2705,7 +2710,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2767,7 +2772,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"},
{file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"},
@@ -2901,7 +2906,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3244,7 +3249,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
@@ -3389,4 +3394,4 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.1"
python-versions = "^3.9.0"
content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"
content-hash = "522f5bacf5610646876452e0e397038dd5c959692d2ab76214431bff78562d01"
+2 -2
View File
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.130.0rc1"
version = "1.130.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -320,7 +320,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.7.3"
ruff = "0.11.10"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
+100 -31
View File
@@ -1,6 +1,8 @@
#!/usr/bin/env python3
# Check that no schema deltas have been added to the wrong version.
#
# Also checks that schema deltas do not try and create or drop indices.
import re
from typing import Any, Dict, List
@@ -9,6 +11,13 @@ import click
import git
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
INDEX_CREATION_REGEX = re.compile(r"CREATE .*INDEX .*ON ([a-z_]+)", flags=re.IGNORECASE)
INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_]+)", flags=re.IGNORECASE)
TABLE_CREATION_REGEX = re.compile(r"CREATE .*TABLE ([a-z_]+)", flags=re.IGNORECASE)
# The base branch we want to check against. We use the main development branch
# on the assumption that is what we are developing against.
DEVELOP_BRANCH = "develop"
@click.command()
@@ -20,6 +29,9 @@ SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
help="Always output ANSI colours",
)
def main(force_colors: bool) -> None:
# Return code. Set to non-zero when we encounter an error
return_code = 0
click.secho(
"+++ Checking schema deltas are in the right folder",
fg="green",
@@ -30,17 +42,17 @@ def main(force_colors: bool) -> None:
click.secho("Updating repo...")
repo = git.Repo()
repo.remote().fetch()
repo.remote().fetch(refspec=DEVELOP_BRANCH)
click.secho("Getting current schema version...")
r = repo.git.show("origin/develop:synapse/storage/schema/__init__.py")
r = repo.git.show(f"origin/{DEVELOP_BRANCH}:synapse/storage/schema/__init__.py")
locals: Dict[str, Any] = {}
exec(r, locals)
current_schema_version = locals["SCHEMA_VERSION"]
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
diffs: List[git.Diff] = repo.remote().refs[DEVELOP_BRANCH].commit.diff(None)
# Get the schema version of the local file to check against current schema on develop
with open("synapse/storage/schema/__init__.py") as file:
@@ -53,7 +65,7 @@ def main(force_colors: bool) -> None:
# local schema version must be +/-1 the current schema version on develop
if abs(local_schema_version - current_schema_version) != 1:
click.secho(
"The proposed schema version has diverged more than one version from develop, please fix!",
f"The proposed schema version has diverged more than one version from {DEVELOP_BRANCH}, please fix!",
fg="red",
bold=True,
color=force_colors,
@@ -67,21 +79,28 @@ def main(force_colors: bool) -> None:
click.secho(f"Current schema version: {current_schema_version}")
seen_deltas = False
bad_files = []
bad_delta_files = []
changed_delta_files = []
for diff in diffs:
if not diff.new_file or diff.b_path is None:
if diff.b_path is None:
# We don't lint deleted files.
continue
match = SCHEMA_FILE_REGEX.match(diff.b_path)
if not match:
continue
changed_delta_files.append(diff.b_path)
if not diff.new_file:
continue
seen_deltas = True
_, delta_version, _ = match.groups()
if delta_version != str(current_schema_version):
bad_files.append(diff.b_path)
bad_delta_files.append(diff.b_path)
if not seen_deltas:
click.secho(
@@ -92,41 +111,91 @@ def main(force_colors: bool) -> None:
)
return
if not bad_files:
if bad_delta_files:
bad_delta_files.sort()
click.secho(
"Found deltas in the wrong folder!",
fg="red",
bold=True,
color=force_colors,
)
for f in bad_delta_files:
click.secho(
f"\t{f}",
fg="red",
bold=True,
color=force_colors,
)
click.secho()
click.secho(
f"Please move these files to delta/{current_schema_version}/",
fg="red",
bold=True,
color=force_colors,
)
else:
click.secho(
f"All deltas are in the correct folder: {current_schema_version}!",
fg="green",
bold=True,
color=force_colors,
)
return
bad_files.sort()
# Make sure we process them in order. This sort works because deltas are numbered
# and delta files are also numbered in order.
changed_delta_files.sort()
click.secho(
"Found deltas in the wrong folder!",
fg="red",
bold=True,
color=force_colors,
)
# Now check that we're not trying to create or drop indices. If we want to
# do that they should be in background updates. The exception is when we
# create indices on tables we've just created.
created_tables = set()
for delta_file in changed_delta_files:
with open(delta_file) as fd:
delta_lines = fd.readlines()
for f in bad_files:
click.secho(
f"\t{f}",
fg="red",
bold=True,
color=force_colors,
)
for line in delta_lines:
# Strip SQL comments
line = line.split("--", maxsplit=1)[0]
click.secho()
click.secho(
f"Please move these files to delta/{current_schema_version}/",
fg="red",
bold=True,
color=force_colors,
)
# Check and track any tables we create
match = TABLE_CREATION_REGEX.search(line)
if match:
table_name = match.group(1)
created_tables.add(table_name)
click.get_current_context().exit(1)
# Check for dropping indices, these are always banned
match = INDEX_DELETION_REGEX.search(line)
if match:
clause = match.group()
click.secho(
f"Found delta with index deletion: '{clause}' in {delta_file}\nThese should be in background updates.",
fg="red",
bold=True,
color=force_colors,
)
return_code = 1
# Check for index creation, which is only allowed for tables we've
# created.
match = INDEX_CREATION_REGEX.search(line)
if match:
clause = match.group()
table_name = match.group(1)
if table_name not in created_tables:
click.secho(
f"Found delta with index creation: '{clause}' in {delta_file}\nThese should be in background updates.",
fg="red",
bold=True,
color=force_colors,
)
return_code = 1
click.get_current_context().exit(return_code)
if __name__ == "__main__":
+1 -1
View File
@@ -1065,7 +1065,7 @@ class Porter:
def get_sent_table_size(txn: LoggingTransaction) -> int:
txn.execute(
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
"SELECT count(*) FROM sent_transactions WHERE ts >= ?", (yesterday,)
)
result = txn.fetchone()
assert result is not None
+3 -3
View File
@@ -292,9 +292,9 @@ def main() -> None:
for key in worker_config:
if key == "worker_app": # But we allow worker_app
continue
assert not key.startswith(
"worker_"
), "Main process cannot use worker_* config"
assert not key.startswith("worker_"), (
"Main process cannot use worker_* config"
)
else:
worker_pidfile = worker_config["worker_pid_file"]
worker_cache_factor = worker_config.get("synctl_cache_factor")
-1
View File
@@ -285,7 +285,6 @@ class GenericWorkerServer(HomeServer):
raise ConfigError(
"Can not using a unix socket for manhole at this time."
)
else:
logger.warning("Unsupported listener type: %s", listener.type)
-1
View File
@@ -288,7 +288,6 @@ class SynapseHomeServer(HomeServer):
raise ConfigError(
"Can not use a unix socket for manhole at this time."
)
else:
# this shouldn't happen, as the listener type should have been checked
# during parsing
+29 -4
View File
@@ -34,6 +34,22 @@ if TYPE_CHECKING:
logger = logging.getLogger("synapse.app.homeserver")
ONE_MINUTE_SECONDS = 60
ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS
MILLISECONDS_PER_SECOND = 1000
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
"""
We wait 5 minutes to send the first set of stats as the server can be quite busy the
first few minutes
"""
PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
"""
Phone home stats are sent every 3 hours
"""
# Contains the list of processes we will be monitoring
# currently either 0 or 1
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
@@ -185,12 +201,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
# If you increase the loop period, the accuracy of user_daily_visits
# table will decrease
clock.looping_call(
hs.get_datastores().main.generate_user_daily_visits, 5 * 60 * 1000
hs.get_datastores().main.generate_user_daily_visits,
5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
)
# monthly active user limiting functionality
clock.looping_call(
hs.get_datastores().main.reap_monthly_active_users, 1000 * 60 * 60
hs.get_datastores().main.reap_monthly_active_users,
ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
)
hs.get_datastores().main.reap_monthly_active_users()
@@ -221,7 +239,12 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
if hs.config.metrics.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
clock.looping_call(
phone_stats_home,
PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
hs,
stats,
)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
@@ -229,4 +252,6 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
clock.call_later(5 * 60, phone_stats_home, hs, stats)
clock.call_later(
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, phone_stats_home, hs, stats
)
+1 -6
View File
@@ -43,8 +43,7 @@ class SsoAttributeRequirement:
"""Object describing a single requirement for SSO attributes."""
attribute: str
# If neither value nor one_of is given, the attribute must simply exist. This is
# only true for CAS configs which use a different JSON schema than the one below.
# If neither `value` nor `one_of` is given, the attribute must simply exist.
value: Optional[str] = None
one_of: Optional[List[str]] = None
@@ -56,10 +55,6 @@ class SsoAttributeRequirement:
"one_of": {"type": "array", "items": {"type": "string"}},
},
"required": ["attribute"],
"oneOf": [
{"required": ["value"]},
{"required": ["one_of"]},
],
}
+1 -2
View File
@@ -108,8 +108,7 @@ class TlsConfig(Config):
# Raise an error if this option has been specified without any
# corresponding certificates.
raise ConfigError(
"federation_custom_ca_list specified without "
"any certificate files"
"federation_custom_ca_list specified without any certificate files"
)
certs = []
+1 -2
View File
@@ -986,8 +986,7 @@ def _check_power_levels(
if old_level == user_level:
raise AuthError(
403,
"You don't have permission to remove ops level equal "
"to your own",
"You don't have permission to remove ops level equal to your own",
)
# Check if the old and new levels are greater than the user level
+1 -1
View File
@@ -445,7 +445,7 @@ class AdminHandler:
user_id,
room,
limit,
["m.room.member", "m.room.message"],
["m.room.member", "m.room.message", "m.room.encrypted"],
)
if not event_ids:
# nothing to redact in this room
+4 -8
View File
@@ -1163,7 +1163,7 @@ class E2eKeysHandler:
devices = devices[user_id]
except SynapseError as e:
failure = _exception_to_failure(e)
failures[user_id] = {device: failure for device in signatures.keys()}
failures[user_id] = dict.fromkeys(signatures.keys(), failure)
return signature_list, failures
for device_id, device in signatures.items():
@@ -1303,7 +1303,7 @@ class E2eKeysHandler:
except SynapseError as e:
failure = _exception_to_failure(e)
for user, devicemap in signatures.items():
failures[user] = {device_id: failure for device_id in devicemap.keys()}
failures[user] = dict.fromkeys(devicemap.keys(), failure)
return signature_list, failures
for target_user, devicemap in signatures.items():
@@ -1344,9 +1344,7 @@ class E2eKeysHandler:
# other devices were signed -- mark those as failures
logger.debug("upload signature: too many devices specified")
failure = _exception_to_failure(NotFoundError("Unknown device"))
failures[target_user] = {
device: failure for device in other_devices
}
failures[target_user] = dict.fromkeys(other_devices, failure)
if user_signing_key_id in master_key.get("signatures", {}).get(
user_id, {}
@@ -1367,9 +1365,7 @@ class E2eKeysHandler:
except SynapseError as e:
failure = _exception_to_failure(e)
if device_id is None:
failures[target_user] = {
device_id: failure for device_id in devicemap.keys()
}
failures[target_user] = dict.fromkeys(devicemap.keys(), failure)
else:
failures.setdefault(target_user, {})[device_id] = failure
+3 -3
View File
@@ -1312,9 +1312,9 @@ class FederationHandler:
if state_key is not None:
# the event was not rejected (get_event raises a NotFoundError for rejected
# events) so the state at the event should include the event itself.
assert (
state_map.get((event.type, state_key)) == event.event_id
), "State at event did not include event itself"
assert state_map.get((event.type, state_key)) == event.event_id, (
"State at event did not include event itself"
)
# ... but we need the state *before* that event
if "replaces_state" in event.unsigned:
+12 -10
View File
@@ -143,9 +143,9 @@ class MessageHandler:
elif membership == Membership.LEAVE:
key = (event_type, state_key)
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
assert membership_event_id is not None, (
"check_user_in_room_or_world_readable returned invalid data"
)
room_state = await self._state_storage_controller.get_state_for_events(
[membership_event_id], StateFilter.from_types([key])
)
@@ -242,9 +242,9 @@ class MessageHandler:
room_state = await self.store.get_events(state_ids.values())
elif membership == Membership.LEAVE:
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
assert membership_event_id is not None, (
"check_user_in_room_or_world_readable returned invalid data"
)
room_state_events = (
await self._state_storage_controller.get_state_for_events(
[membership_event_id], state_filter=state_filter
@@ -1267,12 +1267,14 @@ class EventCreationHandler:
# Allow an event to have empty list of prev_event_ids
# only if it has auth_event_ids.
or auth_event_ids
), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
), (
"Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
)
else:
# we now ought to have some prev_events (unless it's a create event).
assert (
builder.type == EventTypes.Create or prev_event_ids
), "Attempting to create a non-m.room.create event with no prev_events"
assert builder.type == EventTypes.Create or prev_event_ids, (
"Attempting to create a non-m.room.create event with no prev_events"
)
if for_batch:
assert prev_event_ids is not None
+3 -3
View File
@@ -1192,9 +1192,9 @@ class SsoHandler:
"""
# It is expected that this is the main process.
assert isinstance(
self._device_handler, DeviceHandler
), "revoking SSO sessions can only be called on the main process"
assert isinstance(self._device_handler, DeviceHandler), (
"revoking SSO sessions can only be called on the main process"
)
# Invalidate any running user-mapping sessions
to_delete = []
+3 -3
View File
@@ -426,9 +426,9 @@ class MatrixFederationHttpClient:
)
else:
proxy_authorization_secret = hs.config.worker.worker_replication_secret
assert (
proxy_authorization_secret is not None
), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
assert proxy_authorization_secret is not None, (
"`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
)
federation_proxy_credentials = BearerProxyCredentials(
proxy_authorization_secret.encode("ascii")
)
+6 -6
View File
@@ -173,9 +173,9 @@ class ProxyAgent(_AgentBase):
self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
self._federation_proxy_credentials: Optional[ProxyCredentials] = None
if federation_proxy_locations:
assert (
federation_proxy_credentials is not None
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
assert federation_proxy_credentials is not None, (
"`federation_proxy_credentials` are required when using `federation_proxy_locations`"
)
endpoints: List[IStreamClientEndpoint] = []
for federation_proxy_location in federation_proxy_locations:
@@ -302,9 +302,9 @@ class ProxyAgent(_AgentBase):
parsed_uri.scheme == b"matrix-federation"
and self._federation_proxy_endpoint
):
assert (
self._federation_proxy_credentials is not None
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
assert self._federation_proxy_credentials is not None, (
"`federation_proxy_credentials` are required when using `federation_proxy_locations`"
)
# Set a Proxy-Authorization header
if headers is None:
+3 -3
View File
@@ -582,9 +582,9 @@ def parse_enum(
is not one of those allowed values.
"""
# Assert the enum values are strings.
assert all(
isinstance(e.value, str) for e in E
), "parse_enum only works with string values"
assert all(isinstance(e.value, str) for e in E), (
"parse_enum only works with string values"
)
str_value = parse_string(
request,
name,
+3 -3
View File
@@ -894,9 +894,9 @@ class ModuleApi:
Raises:
synapse.api.errors.AuthError: the access token is invalid
"""
assert isinstance(
self._device_handler, DeviceHandler
), "invalidate_access_token can only be called on the main process"
assert isinstance(self._device_handler, DeviceHandler), (
"invalidate_access_token can only be called on the main process"
)
# see if the access token corresponds to a device
user_info = yield defer.ensureDeferred(
+3 -3
View File
@@ -131,9 +131,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
# We reserve `instance_name` as a parameter to sending requests, so we
# assert here that sub classes don't try and use the name.
assert (
"instance_name" not in self.PATH_ARGS
), "`instance_name` is a reserved parameter name"
assert "instance_name" not in self.PATH_ARGS, (
"`instance_name` is a reserved parameter name"
)
assert (
"instance_name"
not in signature(self.__class__._serialize_payload).parameters
+3 -3
View File
@@ -200,9 +200,9 @@ class EventsStream(_StreamFromIdGen):
# we rely on get_all_new_forward_event_rows strictly honouring the limit, so
# that we know it is safe to just take upper_limit = event_rows[-1][0].
assert (
len(event_rows) <= target_row_count
), "get_all_new_forward_event_rows did not honour row limit"
assert len(event_rows) <= target_row_count, (
"get_all_new_forward_event_rows did not honour row limit"
)
# if we hit the limit on event_updates, there's no point in going beyond the
# last stream_id in the batch for the other sources.
+1 -2
View File
@@ -207,8 +207,7 @@ class PurgeHistoryRestServlet(RestServlet):
(stream, topo, _event_id) = r
token = "t%d-%d" % (topo, stream)
logger.info(
"[purge] purging up to token %s (received_ts %i => "
"stream_ordering %i)",
"[purge] purging up to token %s (received_ts %i => stream_ordering %i)",
token,
ts,
stream_ordering,
+1
View File
@@ -150,6 +150,7 @@ class RoomRestV2Servlet(RestServlet):
def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict:
return {
"delete_id": task.id,
"room_id": task.resource_id,
"status": task.status,
"shutdown_room": task.result,
}
+1 -3
View File
@@ -39,9 +39,7 @@ logger = logging.getLogger(__name__)
class ReceiptRestServlet(RestServlet):
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)"
"/receipt/(?P<receipt_type>[^/]*)"
"/(?P<event_id>[^/]*)$"
"/rooms/(?P<room_id>[^/]*)/receipt/(?P<receipt_type>[^/]*)/(?P<event_id>[^/]*)$"
)
CATEGORY = "Receipts requests"
+3 -3
View File
@@ -44,9 +44,9 @@ class MSC4108DelegationRendezvousServlet(RestServlet):
redirection_target: Optional[str] = (
hs.config.experimental.msc4108_delegation_endpoint
)
assert (
redirection_target is not None
), "Servlet is only registered if there is a delegation target"
assert redirection_target is not None, (
"Servlet is only registered if there is a delegation target"
)
self.endpoint = redirection_target.encode("utf-8")
async def on_POST(self, request: SynapseRequest) -> None:
+3 -3
View File
@@ -94,9 +94,9 @@ class HttpTransactionCache:
# (appservice and guest users), but does not cover access tokens minted
# by the admin API. Use the access token ID instead.
else:
assert (
requester.access_token_id is not None
), "Requester must have an access_token_id"
assert requester.access_token_id is not None, (
"Requester must have an access_token_id"
)
return (path, "user_admin", requester.access_token_id)
def fetch_or_execute_request(
+6 -6
View File
@@ -739,9 +739,9 @@ class BackgroundUpdater:
c.execute(sql)
async def updater(progress: JsonDict, batch_size: int) -> int:
assert isinstance(
self.db_pool.engine, engines.PostgresEngine
), "validate constraint background update registered for non-Postres database"
assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
"validate constraint background update registered for non-Postres database"
)
logger.info("Validating constraint %s to %s", constraint_name, table)
await self.db_pool.runWithConnection(runner)
@@ -900,9 +900,9 @@ class BackgroundUpdater:
on the table. Used to iterate over the table.
"""
assert isinstance(
self.db_pool.engine, engines.PostgresEngine
), "validate constraint background update registered for non-Postres database"
assert isinstance(self.db_pool.engine, engines.PostgresEngine), (
"validate constraint background update registered for non-Postres database"
)
async def updater(progress: JsonDict, batch_size: int) -> int:
return await self.validate_constraint_and_delete_in_background(
@@ -870,8 +870,7 @@ class EventsPersistenceStorageController:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
"Context for new event %s has no state group" % (ev.event_id,)
)
continue
if ctx.state_group_deltas:
+6 -6
View File
@@ -653,9 +653,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
@wrap_as_background_process("update_client_ips")
async def _update_client_ips_batch(self) -> None:
assert (
self._update_on_this_worker
), "This worker is not designated to update client IPs"
assert self._update_on_this_worker, (
"This worker is not designated to update client IPs"
)
# If the DB pool has already terminated, don't try updating
if not self.db_pool.is_running():
@@ -674,9 +674,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke
txn: LoggingTransaction,
to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]],
) -> None:
assert (
self._update_on_this_worker
), "This worker is not designated to update client IPs"
assert self._update_on_this_worker, (
"This worker is not designated to update client IPs"
)
# Keys and values for the `user_ips` upsert.
user_ips_keys = []
@@ -203,9 +203,9 @@ class DeviceInboxWorkerStore(SQLBaseStore):
to_stream_id=to_stream_id,
)
assert (
last_processed_stream_id == to_stream_id
), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
assert last_processed_stream_id == to_stream_id, (
"Expected _get_device_messages to process all to-device messages up to `to_stream_id`"
)
return user_id_device_id_to_messages
+1 -1
View File
@@ -1096,7 +1096,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
),
)
results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids}
results: Dict[str, Optional[str]] = dict.fromkeys(user_ids)
results.update(rows)
return results
+13 -14
View File
@@ -240,9 +240,9 @@ class PersistEventsStore:
self.is_mine_id = hs.is_mine_id
# This should only exist on instances that are configured to write
assert (
hs.get_instance_name() in hs.config.worker.writers.events
), "Can only instantiate EventsStore on master"
assert hs.get_instance_name() in hs.config.worker.writers.events, (
"Can only instantiate EventsStore on master"
)
# Since we have been configured to write, we ought to have id generators,
# rather than id trackers.
@@ -471,9 +471,9 @@ class PersistEventsStore:
missing_membership_event_ids
)
# There shouldn't be any missing events
assert (
remaining_events.keys() == missing_membership_event_ids
), missing_membership_event_ids.difference(remaining_events.keys())
assert remaining_events.keys() == missing_membership_event_ids, (
missing_membership_event_ids.difference(remaining_events.keys())
)
membership_event_map.update(remaining_events)
for (
@@ -540,9 +540,9 @@ class PersistEventsStore:
missing_state_event_ids
)
# There shouldn't be any missing events
assert (
remaining_events.keys() == missing_state_event_ids
), missing_state_event_ids.difference(remaining_events.keys())
assert remaining_events.keys() == missing_state_event_ids, (
missing_state_event_ids.difference(remaining_events.keys())
)
for event in remaining_events.values():
current_state_map[(event.type, event.state_key)] = event
@@ -650,9 +650,9 @@ class PersistEventsStore:
if missing_event_ids:
remaining_events = await self.store.get_events(missing_event_ids)
# There shouldn't be any missing events
assert (
remaining_events.keys() == missing_event_ids
), missing_event_ids.difference(remaining_events.keys())
assert remaining_events.keys() == missing_event_ids, (
missing_event_ids.difference(remaining_events.keys())
)
for event in remaining_events.values():
current_state_map[(event.type, event.state_key)] = event
@@ -3454,8 +3454,7 @@ class PersistEventsStore:
# Delete all these events that we've already fetched and now know that their
# prev events are the new backwards extremeties.
query = (
"DELETE FROM event_backward_extremities"
" WHERE event_id = ? AND room_id = ?"
"DELETE FROM event_backward_extremities WHERE event_id = ? AND room_id = ?"
)
backward_extremity_tuples_to_remove = [
(ev.event_id, ev.room_id)
@@ -24,7 +24,12 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
import attr
from synapse.api.constants import EventContentFields, Membership, RelationTypes
from synapse.api.constants import (
MAX_DEPTH,
EventContentFields,
Membership,
RelationTypes,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase, make_event_from_dict
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -311,6 +316,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update,
)
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update
)
# We want this to run on the main database at startup before we start processing
# events.
#
@@ -2547,6 +2556,77 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
return num_rows
async def fixup_max_depth_cap_bg_update(
self, progress: JsonDict, batch_size: int
) -> int:
"""Fixes the topological ordering for events that have a depth greater
than MAX_DEPTH. This should fix /messages ordering oddities."""
room_id_bound = progress.get("room_id", "")
def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]:
txn.execute(
"""
SELECT room_id, room_version FROM rooms
WHERE room_id > ?
ORDER BY room_id
LIMIT ?
""",
(room_id_bound, batch_size),
)
# Find the next room ID to process, with a relevant room version.
room_ids: List[str] = []
max_room_id: Optional[str] = None
for room_id, room_version_str in txn:
max_room_id = room_id
# We only want to process rooms with a known room version that
# has strict canonical json validation enabled.
room_version = KNOWN_ROOM_VERSIONS.get(room_version_str)
if room_version and room_version.strict_canonicaljson:
room_ids.append(room_id)
if max_room_id is None:
# The query did not return any rooms, so we are done.
return True, 0
# Update the progress to the last room ID we pulled from the DB,
# this ensures we always make progress.
self.db_pool.updates._background_update_progress_txn(
txn,
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP,
progress={"room_id": max_room_id},
)
if not room_ids:
# There were no rooms in this batch that required the fix.
return False, 0
clause, list_args = make_in_list_sql_clause(
self.database_engine, "room_id", room_ids
)
sql = f"""
UPDATE events SET topological_ordering = ?
WHERE topological_ordering > ? AND {clause}
"""
args = [MAX_DEPTH, MAX_DEPTH]
args.extend(list_args)
txn.execute(sql, args)
return False, len(room_ids)
done, num_rooms = await self.db_pool.runInteraction(
"redo_max_depth_bg_update", redo_max_depth_bg_update_txn
)
if done:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP
)
return num_rooms
def _resolve_stale_data_in_sliding_sync_tables(
txn: LoggingTransaction,
@@ -827,9 +827,9 @@ class EventsWorkerStore(SQLBaseStore):
if missing_events_ids:
async def get_missing_events_from_cache_or_db() -> (
Dict[str, EventCacheEntry]
):
async def get_missing_events_from_cache_or_db() -> Dict[
str, EventCacheEntry
]:
"""Fetches the events in `missing_event_ids` from the database.
Also creates entries in `self._current_event_fetches` to allow
@@ -304,9 +304,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
txn:
threepids: List of threepid dicts to reserve
"""
assert (
self._update_on_this_worker
), "This worker is not designated to update MAUs"
assert self._update_on_this_worker, (
"This worker is not designated to update MAUs"
)
# XXX what is this function trying to achieve? It upserts into
# monthly_active_users for each *registered* reserved mau user, but why?
@@ -340,9 +340,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
Args:
user_id: user to add/update
"""
assert (
self._update_on_this_worker
), "This worker is not designated to update MAUs"
assert self._update_on_this_worker, (
"This worker is not designated to update MAUs"
)
# Support user never to be included in MAU stats. Note I can't easily call this
# from upsert_monthly_active_user_txn because then I need a _txn form of
@@ -379,9 +379,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
txn:
user_id: user to add/update
"""
assert (
self._update_on_this_worker
), "This worker is not designated to update MAUs"
assert self._update_on_this_worker, (
"This worker is not designated to update MAUs"
)
# Am consciously deciding to lock the table on the basis that is ought
# never be a big table and alternative approaches (batching multiple
@@ -409,9 +409,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
Args:
user_id: the user_id to query
"""
assert (
self._update_on_this_worker
), "This worker is not designated to update MAUs"
assert self._update_on_this_worker, (
"This worker is not designated to update MAUs"
)
if self._limit_usage_by_mau or self._mau_stats_only:
# Trial users and guests should not be included as part of MAU group
@@ -199,8 +199,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# Update backward extremeties
txn.execute_batch(
"INSERT INTO event_backward_extremities (room_id, event_id)"
" VALUES (?, ?)",
"INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)",
[(room_id, event_id) for (event_id,) in new_backwards_extrems],
)
@@ -68,6 +68,14 @@ class SlidingSyncStore(SQLBaseStore):
columns=("membership_event_id",),
)
self.db_pool.updates.register_background_index_update(
update_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
index_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
table="sliding_sync_membership_snapshots",
columns=("user_id", "event_stream_ordering"),
replaces_index="sliding_sync_membership_snapshots_user_id",
)
async def get_latest_bump_stamp_for_room(
self,
room_id: str,
@@ -98,9 +98,9 @@ class StateDeltasStore(SQLBaseStore):
prev_stream_id = int(prev_stream_id)
# check we're not going backwards
assert (
prev_stream_id <= max_stream_id
), f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
assert prev_stream_id <= max_stream_id, (
f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}"
)
if not self._curr_state_delta_stream_cache.has_any_entity_changed(
prev_stream_id
+1 -4
View File
@@ -274,10 +274,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
def remove_tag_txn(txn: LoggingTransaction, next_id: int) -> None:
sql = (
"DELETE FROM room_tags "
" WHERE user_id = ? AND room_id = ? AND tag = ?"
)
sql = "DELETE FROM room_tags WHERE user_id = ? AND room_id = ? AND tag = ?"
txn.execute(sql, (user_id, room_id, tag))
self._update_revision_txn(txn, user_id, room_id, next_id)
@@ -582,9 +582,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
retry_counter: number of failures in refreshing the profile so far. Used for
exponential backoff calculations.
"""
assert not self.hs.is_mine_id(
user_id
), "Can't mark a local user as a stale remote user."
assert not self.hs.is_mine_id(user_id), (
"Can't mark a local user as a stale remote user."
)
server_name = UserID.from_string(user_id).domain
@@ -396,8 +396,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
return True, count
txn.execute(
"SELECT state_group FROM state_group_edges"
" WHERE state_group = ?",
"SELECT state_group FROM state_group_edges WHERE state_group = ?",
(state_group,),
)
+1 -2
View File
@@ -75,8 +75,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
progress_json = json.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
" VALUES (?, ?)"
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
)
cur.execute(sql, ("event_search", progress_json))
+1 -2
View File
@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
progress_json = json.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
" VALUES (?, ?)"
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
)
cur.execute(sql, ("event_origin_server_ts", progress_json))
@@ -59,8 +59,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
progress_json = json.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
" VALUES (?, ?)"
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
)
cur.execute(sql, ("event_search_order", progress_json))
@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) ->
progress_json = json.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
" VALUES (?, ?)"
"INSERT into background_updates (update_name, progress_json) VALUES (?, ?)"
)
cur.execute(sql, ("event_fields_sender_url", progress_json))
@@ -12,5 +12,5 @@
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- So we can fetch all rooms for a given user sorted by stream order
DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id;
CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering);
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(9204, 'sliding_sync_membership_snapshots_user_id_stream_ordering', '{}');
@@ -0,0 +1,17 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2025 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Background update that fixes any events with a topological ordering above the
-- MAX_DEPTH value.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(9205, 'fixup_max_depth_cap', '{}');
+1 -2
View File
@@ -889,8 +889,7 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken):
def __str__(self) -> str:
instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items()))
return (
f"MultiWriterStreamToken(stream: {self.stream}, "
f"instances: {{{instances}}})"
f"MultiWriterStreamToken(stream: {self.stream}, instances: {{{instances}}})"
)
+1 -1
View File
@@ -462,7 +462,7 @@ class StateFilter:
new_types.update({state_type: set() for state_type in minus_wildcards})
# insert the plus wildcards
new_types.update({state_type: None for state_type in plus_wildcards})
new_types.update(dict.fromkeys(plus_wildcards))
# insert the specific state keys
for state_type, state_key in plus_state_keys:
+2
View File
@@ -52,3 +52,5 @@ class _BackgroundUpdates:
MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = (
"mark_unreferenced_state_groups_for_deletion_bg_update"
)
FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap"
+2 -2
View File
@@ -114,7 +114,7 @@ def sorted_topologically(
# This is implemented by Kahn's algorithm.
degree_map = {node: 0 for node in nodes}
degree_map = dict.fromkeys(nodes, 0)
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
@@ -164,7 +164,7 @@ def sorted_topologically_batched(
persisted.
"""
degree_map = {node: 0 for node in nodes}
degree_map = dict.fromkeys(nodes, 0)
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
@@ -65,20 +65,20 @@ def required_state_json_to_state_map(required_state: Any) -> StateMap[EventBase]
if isinstance(required_state, list):
for state_event_dict in required_state:
# Yell because we're in a test and this is unexpected
assert isinstance(
state_event_dict, dict
), "`required_state` should be a list of event dicts"
assert isinstance(state_event_dict, dict), (
"`required_state` should be a list of event dicts"
)
event_type = state_event_dict["type"]
event_state_key = state_event_dict["state_key"]
# Yell because we're in a test and this is unexpected
assert isinstance(
event_type, str
), "Each event in `required_state` should have a string `type`"
assert isinstance(
event_state_key, str
), "Each event in `required_state` should have a string `state_key`"
assert isinstance(event_type, str), (
"Each event in `required_state` should have a string `type`"
)
assert isinstance(event_state_key, str), (
"Each event in `required_state` should have a string `state_key`"
)
state_map[(event_type, event_state_key)] = make_event_from_dict(
state_event_dict
+76 -1
View File
@@ -1453,7 +1453,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
}
}
)
def test_attribute_requirements_one_of(self) -> None:
def test_attribute_requirements_one_of_succeeds(self) -> None:
"""Test that auth succeeds if userinfo attribute has multiple values and CONTAINS required value"""
# userinfo with "test": ["bar"] attribute should succeed.
userinfo = {
@@ -1475,6 +1475,81 @@ class OidcHandlerTestCase(HomeserverTestCase):
auth_provider_session_id=None,
)
@override_config(
{
"oidc_config": {
**DEFAULT_CONFIG,
"attribute_requirements": [
{"attribute": "test", "one_of": ["foo", "bar"]}
],
}
}
)
def test_attribute_requirements_one_of_fails(self) -> None:
"""Test that auth fails if userinfo attribute has multiple values yet
DOES NOT CONTAIN a required value
"""
# userinfo with "test": ["something else"] attribute should fail.
userinfo = {
"sub": "tester",
"username": "tester",
"test": ["something else"],
}
request, _ = self.start_authorization(userinfo)
self.get_success(self.handler.handle_oidc_callback(request))
self.complete_sso_login.assert_not_called()
@override_config(
{
"oidc_config": {
**DEFAULT_CONFIG,
"attribute_requirements": [{"attribute": "test"}],
}
}
)
def test_attribute_requirements_does_not_exist(self) -> None:
"""OIDC login fails if the required attribute does not exist in the OIDC userinfo response."""
# userinfo lacking "test" attribute should fail.
userinfo = {
"sub": "tester",
"username": "tester",
}
request, _ = self.start_authorization(userinfo)
self.get_success(self.handler.handle_oidc_callback(request))
self.complete_sso_login.assert_not_called()
@override_config(
{
"oidc_config": {
**DEFAULT_CONFIG,
"attribute_requirements": [{"attribute": "test"}],
}
}
)
def test_attribute_requirements_exist(self) -> None:
"""OIDC login succeeds if the required attribute exist (regardless of value)
in the OIDC userinfo response.
"""
# userinfo with "test" attribute and random value should succeed.
userinfo = {
"sub": "tester",
"username": "tester",
"test": random_string(5), # value does not matter
}
request, _ = self.start_authorization(userinfo)
self.get_success(self.handler.handle_oidc_callback(request))
# check that the auth handler got called as expected
self.complete_sso_login.assert_called_once_with(
"@tester:test",
self.provider.idp_id,
request,
ANY,
None,
new_user=True,
auth_provider_session_id=None,
)
@override_config(
{
"oidc_config": {
+2 -2
View File
@@ -1178,10 +1178,10 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
for use_numeric in [False, True]:
if use_numeric:
prefix1 = f"{i}"
prefix2 = f"{i+1}"
prefix2 = f"{i + 1}"
else:
prefix1 = f"a{i}"
prefix2 = f"a{i+1}"
prefix2 = f"a{i + 1}"
local_user_1 = self.register_user(f"user{char}{prefix1}", "password")
local_user_2 = self.register_user(f"user{char}{prefix2}", "password")
+2 -6
View File
@@ -436,8 +436,7 @@ class FederationClientTests(HomeserverTestCase):
# Send it the HTTP response
client.dataReceived(
b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
b"Server: Fake\r\n\r\n"
b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nServer: Fake\r\n\r\n"
)
# Push by enough to time it out
@@ -691,10 +690,7 @@ class FederationClientTests(HomeserverTestCase):
# Send it a huge HTTP response
protocol.dataReceived(
b"HTTP/1.1 200 OK\r\n"
b"Server: Fake\r\n"
b"Content-Type: application/json\r\n"
b"\r\n"
b"HTTP/1.1 200 OK\r\nServer: Fake\r\nContent-Type: application/json\r\n\r\n"
)
self.pump()
+1 -3
View File
@@ -250,9 +250,7 @@ small_cmyk_jpeg = TestImage(
)
small_lossless_webp = TestImage(
unhexlify(
b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
),
unhexlify(b"524946461a000000574542505650384c0d0000002f00000010071011118888fe0700"),
b"image/webp",
b".webp",
)
+263
View File
@@ -0,0 +1,263 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
import logging
from unittest.mock import AsyncMock
from twisted.test.proto_helpers import MemoryReactor
from synapse.app.phone_stats_home import (
PHONE_HOME_INTERVAL_SECONDS,
start_phone_stats_home,
)
from synapse.rest import admin, login, register, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
from tests import unittest
from tests.server import ThreadedMemoryReactorClock
TEST_REPORT_STATS_ENDPOINT = "https://fake.endpoint/stats"
TEST_SERVER_CONTEXT = "test-server-context"
class PhoneHomeStatsTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
register.register_servlets,
login.register_servlets,
]
def make_homeserver(
self, reactor: ThreadedMemoryReactorClock, clock: Clock
) -> HomeServer:
# Configure the homeserver to enable stats reporting.
config = self.default_config()
config["report_stats"] = True
config["report_stats_endpoint"] = TEST_REPORT_STATS_ENDPOINT
# Configure the server context so we can check it ends up being reported
config["server_context"] = TEST_SERVER_CONTEXT
# Allow guests to be registered
config["allow_guest_access"] = True
hs = self.setup_test_homeserver(config=config)
# Replace the proxied http client with a mock, so we can inspect outbound requests to
# the configured stats endpoint.
self.put_json_mock = AsyncMock(return_value={})
hs.get_proxied_http_client().put_json = self.put_json_mock # type: ignore[method-assign]
return hs
def prepare(
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
) -> None:
self.store = homeserver.get_datastores().main
# Wait for the background updates to add the database triggers that keep the
# `event_stats` table up-to-date.
self.wait_for_background_updates()
# Force stats reporting to occur
start_phone_stats_home(hs=homeserver)
super().prepare(reactor, clock, homeserver)
def _get_latest_phone_home_stats(self) -> JsonDict:
# Wait for `phone_stats_home` to be called again + a healthy margin (50s).
self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50)
# Extract the reported stats from our http client mock
mock_calls = self.put_json_mock.call_args_list
report_stats_calls = []
for call in mock_calls:
if call.args[0] == TEST_REPORT_STATS_ENDPOINT:
report_stats_calls.append(call)
self.assertGreaterEqual(
(len(report_stats_calls)),
1,
"Expected at-least one call to the report_stats endpoint",
)
# Extract the phone home stats from the call
phone_home_stats = report_stats_calls[0].args[1]
return phone_home_stats
def _perform_user_actions(self) -> None:
"""
Perform some actions on the homeserver that would bump the phone home
stats.
This creates a few users (including a guest), a room, and sends some messages.
Expected number of events:
- 10 unencrypted messages
- 5 encrypted messages
- 24 total events (including room state, etc)
"""
# Create some users
user_1_mxid = self.register_user(
username="test_user_1",
password="test",
)
user_2_mxid = self.register_user(
username="test_user_2",
password="test",
)
# Note: `self.register_user` does not support guest registration, and updating the
# Admin API it calls to add a new parameter would cause the `mac` parameter to fail
# in a backwards-incompatible manner. Hence, we make a manual request here.
_guest_user_mxid = self.make_request(
method="POST",
path="/_matrix/client/v3/register?kind=guest",
content={
"username": "guest_user",
"password": "test",
},
shorthand=False,
)
# Log in to each user
user_1_token = self.login(username=user_1_mxid, password="test")
user_2_token = self.login(username=user_2_mxid, password="test")
# Create a room between the two users
room_1_id = self.helper.create_room_as(
is_public=False,
tok=user_1_token,
)
# Mark this room as end-to-end encrypted
self.helper.send_state(
room_id=room_1_id,
event_type="m.room.encryption",
body={
"algorithm": "m.megolm.v1.aes-sha2",
"rotation_period_ms": 604800000,
"rotation_period_msgs": 100,
},
state_key="",
tok=user_1_token,
)
# User 1 invites user 2
self.helper.invite(
room=room_1_id,
src=user_1_mxid,
targ=user_2_mxid,
tok=user_1_token,
)
# User 2 joins
self.helper.join(
room=room_1_id,
user=user_2_mxid,
tok=user_2_token,
)
# User 1 sends 10 unencrypted messages
for _ in range(10):
self.helper.send(
room_id=room_1_id,
body="Zoinks Scoob! A message!",
tok=user_1_token,
)
# User 2 sends 5 encrypted "messages"
for _ in range(5):
self.helper.send_event(
room_id=room_1_id,
type="m.room.encrypted",
content={
"algorithm": "m.olm.v1.curve25519-aes-sha2",
"sender_key": "some_key",
"ciphertext": {
"some_key": {
"type": 0,
"body": "encrypted_payload",
},
},
},
tok=user_2_token,
)
def test_phone_home_stats(self) -> None:
"""
Test that the phone home stats contain the stats we expect based on
the scenario carried out in `prepare`
"""
# Do things to bump the stats
self._perform_user_actions()
# Wait for the stats to be reported
phone_home_stats = self._get_latest_phone_home_stats()
self.assertEqual(
phone_home_stats["homeserver"], self.hs.config.server.server_name
)
self.assertTrue(isinstance(phone_home_stats["memory_rss"], int))
self.assertTrue(isinstance(phone_home_stats["cpu_average"], int))
self.assertEqual(phone_home_stats["server_context"], TEST_SERVER_CONTEXT)
self.assertTrue(isinstance(phone_home_stats["timestamp"], int))
self.assertTrue(isinstance(phone_home_stats["uptime_seconds"], int))
self.assertTrue(isinstance(phone_home_stats["python_version"], str))
# We expect only our test users to exist on the homeserver
self.assertEqual(phone_home_stats["total_users"], 3)
self.assertEqual(phone_home_stats["total_nonbridged_users"], 3)
self.assertEqual(phone_home_stats["daily_user_type_native"], 2)
self.assertEqual(phone_home_stats["daily_user_type_guest"], 1)
self.assertEqual(phone_home_stats["daily_user_type_bridged"], 0)
self.assertEqual(phone_home_stats["total_room_count"], 1)
self.assertEqual(phone_home_stats["daily_active_users"], 2)
self.assertEqual(phone_home_stats["monthly_active_users"], 2)
self.assertEqual(phone_home_stats["daily_active_rooms"], 1)
self.assertEqual(phone_home_stats["daily_active_e2ee_rooms"], 1)
self.assertEqual(phone_home_stats["daily_messages"], 10)
self.assertEqual(phone_home_stats["daily_e2ee_messages"], 5)
self.assertEqual(phone_home_stats["daily_sent_messages"], 10)
self.assertEqual(phone_home_stats["daily_sent_e2ee_messages"], 5)
# Our users have not been around for >30 days, hence these are all 0.
self.assertEqual(phone_home_stats["r30v2_users_all"], 0)
self.assertEqual(phone_home_stats["r30v2_users_android"], 0)
self.assertEqual(phone_home_stats["r30v2_users_ios"], 0)
self.assertEqual(phone_home_stats["r30v2_users_electron"], 0)
self.assertEqual(phone_home_stats["r30v2_users_web"], 0)
self.assertEqual(
phone_home_stats["cache_factor"], self.hs.config.caches.global_factor
)
self.assertEqual(
phone_home_stats["event_cache_size"],
self.hs.config.caches.event_cache_size,
)
self.assertEqual(
phone_home_stats["database_engine"],
self.hs.config.database.databases[0].config["name"],
)
self.assertEqual(
phone_home_stats["database_server_version"],
self.hs.get_datastores().main.database_engine.server_version,
)
synapse_logger = logging.getLogger("synapse")
log_level = synapse_logger.getEffectiveLevel()
self.assertEqual(phone_home_stats["log_level"], logging.getLevelName(log_level))
+1 -1
View File
@@ -324,7 +324,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
pls = self.helper.get_state(
self.room_id, EventTypes.PowerLevels, tok=self.user_tok
)
pls["users"].update({u: 50 for u in user_ids})
pls["users"].update(dict.fromkeys(user_ids, 50))
self.helper.send_state(
self.room_id,
EventTypes.PowerLevels,
+8 -1
View File
@@ -758,6 +758,8 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.assertEqual(2, len(channel.json_body["results"]))
self.assertEqual("complete", channel.json_body["results"][0]["status"])
self.assertEqual("complete", channel.json_body["results"][1]["status"])
self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"])
self.assertEqual(self.room_id, channel.json_body["results"][1]["room_id"])
delete_ids = {delete_id1, delete_id2}
self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids)
delete_ids.remove(channel.json_body["results"][0]["delete_id"])
@@ -777,6 +779,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.assertEqual(1, len(channel.json_body["results"]))
self.assertEqual("complete", channel.json_body["results"][0]["status"])
self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"])
self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"])
# get status after more than clearing time for all tasks
self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2)
@@ -1237,6 +1240,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.assertEqual(
delete_id, channel_room_id.json_body["results"][0]["delete_id"]
)
self.assertEqual(
self.room_id, channel_room_id.json_body["results"][0]["room_id"]
)
# get information by delete_id
channel_delete_id = self.make_request(
@@ -1249,6 +1255,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
channel_delete_id.code,
msg=channel_delete_id.json_body,
)
self.assertEqual(self.room_id, channel_delete_id.json_body["room_id"])
# test values that are the same in both responses
for content in [
@@ -1312,7 +1319,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
# Check that response json body contains a "rooms" key
self.assertTrue(
"rooms" in channel.json_body,
msg="Response body does not " "contain a 'rooms' key",
msg="Response body does not contain a 'rooms' key",
)
# Check that 3 rooms were returned
+55 -4
View File
@@ -36,7 +36,13 @@ from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
from synapse.api.constants import ApprovalNoticeMedium, EventTypes, LoginType, UserTypes
from synapse.api.constants import (
ApprovalNoticeMedium,
EventContentFields,
EventTypes,
LoginType,
UserTypes,
)
from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
from synapse.api.room_versions import RoomVersions
from synapse.media.filepath import MediaFilePaths
@@ -3895,9 +3901,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
image_data1 = SMALL_PNG
# Resolution: 1×1, MIME type: image/gif, Extension: gif, Size: 35 B
image_data2 = unhexlify(
b"47494638376101000100800100000000"
b"ffffff2c00000000010001000002024c"
b"01003b"
b"47494638376101000100800100000000ffffff2c00000000010001000002024c01003b"
)
# Resolution: 1×1, MIME type: image/bmp, Extension: bmp, Size: 54 B
image_data3 = unhexlify(
@@ -5467,6 +5471,53 @@ class UserRedactionTestCase(unittest.HomeserverTestCase):
# we originally sent 5 messages so 5 should be redacted
self.assertEqual(len(original_message_ids), 0)
def test_redact_redacts_encrypted_messages(self) -> None:
"""
Test that user's encrypted messages are redacted
"""
encrypted_room = self.helper.create_room_as(
self.admin, tok=self.admin_tok, room_version="7"
)
self.helper.send_state(
encrypted_room,
EventTypes.RoomEncryption,
{EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"},
tok=self.admin_tok,
)
# join room send some messages
originals = []
join = self.helper.join(encrypted_room, self.bad_user, tok=self.bad_user_tok)
originals.append(join["event_id"])
for _ in range(15):
res = self.helper.send_event(
encrypted_room, "m.room.encrypted", {}, tok=self.bad_user_tok
)
originals.append(res["event_id"])
# redact user's events
channel = self.make_request(
"POST",
f"/_synapse/admin/v1/user/{self.bad_user}/redact",
content={"rooms": []},
access_token=self.admin_tok,
)
self.assertEqual(channel.code, 200)
matched = []
filter = json.dumps({"types": [EventTypes.Redaction]})
channel = self.make_request(
"GET",
f"rooms/{encrypted_room}/messages?filter={filter}&limit=50",
access_token=self.admin_tok,
)
self.assertEqual(channel.code, 200)
for event in channel.json_body["chunk"]:
for event_id in originals:
if event["type"] == "m.room.redaction" and event["redacts"] == event_id:
matched.append(event_id)
self.assertEqual(len(matched), len(originals))
class UserRedactionBackgroundTaskTestCase(BaseMultiWorkerStreamTestCase):
servlets = [
@@ -309,8 +309,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
self.assertEqual(
response_body["rooms"][room_id1]["limited"],
False,
f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} '
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
f"Our `timeline_limit` was {sync_body['lists']['foo-list']['timeline_limit']} "
+ f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
+ str(response_body["rooms"][room_id1]),
)
# Check to make sure the latest events are returned
@@ -387,7 +387,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase):
response_body["rooms"][room_id1]["limited"],
True,
f"Our `timeline_limit` was {timeline_limit} "
+ f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. '
+ f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. "
+ str(response_body["rooms"][room_id1]),
)
# Check to make sure that the "live" and historical events are returned
+1 -1
View File
@@ -1006,7 +1006,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
data = base64.b64encode(SMALL_PNG)
end_content = (
b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
) % (data,)
channel = self.make_request(
+3 -3
View File
@@ -716,9 +716,9 @@ class RestHelper:
"/login",
content={"type": "m.login.token", "token": login_token},
)
assert (
channel.code == expected_status
), f"unexpected status in response: {channel.code}"
assert channel.code == expected_status, (
f"unexpected status in response: {channel.code}"
)
return channel.json_body
def auth_via_oidc(
+1 -1
View File
@@ -878,7 +878,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
data = base64.b64encode(SMALL_PNG)
end_content = (
b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>"
b'<html><head><img src="data:image/png;base64,%s" /></head></html>'
) % (data,)
channel = self.make_request(
+3 -3
View File
@@ -225,9 +225,9 @@ class FakeChannel:
new_headers.addRawHeader(k, v)
headers = new_headers
assert isinstance(
headers, Headers
), f"headers are of the wrong type: {headers!r}"
assert isinstance(headers, Headers), (
f"headers are of the wrong type: {headers!r}"
)
self.result["headers"] = headers
+1 -1
View File
@@ -349,7 +349,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
self.mock_txn.execute.assert_called_once_with(
"UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?",
"UPDATE tablename SET colC = ?, colD = ? WHERE colA = ? AND colB = ?",
[3, 4, 1, 2],
)
+3 -3
View File
@@ -211,9 +211,9 @@ class DeviceStoreTestCase(HomeserverTestCase):
even if that means leaving an earlier batch one EDU short of the limit.
"""
assert self.hs.is_mine_id(
"@user_id:test"
), "Test not valid: this MXID should be considered local"
assert self.hs.is_mine_id("@user_id:test"), (
"Test not valid: this MXID should be considered local"
)
self.get_success(
self.store.set_e2e_cross_signing_key(
+1 -1
View File
@@ -114,7 +114,7 @@ def get_all_topologically_sorted_orders(
# This is implemented by Kahn's algorithm, and forking execution each time
# we have a choice over which node to consider next.
degree_map = {node: 0 for node in nodes}
degree_map = dict.fromkeys(nodes, 0)
reverse_graph: Dict[T, Set[T]] = {}
for node, edges in graph.items():
+157
View File
@@ -0,0 +1,157 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
#
from typing import Dict
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import MAX_DEPTH
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.server import HomeServer
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase):
"""Test the background update that caps topological_ordering at MAX_DEPTH."""
def prepare(
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
) -> None:
self.store = self.hs.get_datastores().main
self.db_pool = self.store.db_pool
self.room_id = "!testroom:example.com"
# Reinsert the background update as it was already run at the start of
# the test.
self.get_success(
self.db_pool.simple_insert(
table="background_updates",
values={
"update_name": "fixup_max_depth_cap",
"progress_json": "{}",
},
)
)
def create_room(self, room_version: RoomVersion) -> Dict[str, int]:
"""Create a room with a known room version and insert events.
Returns the set of event IDs that exceed MAX_DEPTH and
their depth.
"""
# Create a room with a specific room version
self.get_success(
self.db_pool.simple_insert(
table="rooms",
values={
"room_id": self.room_id,
"room_version": room_version.identifier,
},
)
)
# Insert events with some depths exceeding MAX_DEPTH
event_id_to_depth: Dict[str, int] = {}
for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5):
event_id = f"$event{depth}:example.com"
event_id_to_depth[event_id] = depth
self.get_success(
self.db_pool.simple_insert(
table="events",
values={
"event_id": event_id,
"room_id": self.room_id,
"topological_ordering": depth,
"depth": depth,
"type": "m.test",
"sender": "@user:test",
"processed": True,
"outlier": False,
},
)
)
return event_id_to_depth
def test_fixup_max_depth_cap_bg_update(self) -> None:
"""Test that the background update correctly caps topological_ordering
at MAX_DEPTH."""
event_id_to_depth = self.create_room(RoomVersions.V6)
# Run the background update
progress = {"room_id": ""}
batch_size = 10
num_rooms = self.get_success(
self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
)
# Verify the number of rooms processed
self.assertEqual(num_rooms, 1)
# Verify that the topological_ordering of events has been capped at
# MAX_DEPTH
rows = self.get_success(
self.db_pool.simple_select_list(
table="events",
keyvalues={"room_id": self.room_id},
retcols=["event_id", "topological_ordering"],
)
)
for event_id, topological_ordering in rows:
if event_id_to_depth[event_id] >= MAX_DEPTH:
# Events with a depth greater than or equal to MAX_DEPTH should
# be capped at MAX_DEPTH.
self.assertEqual(topological_ordering, MAX_DEPTH)
else:
# Events with a depth less than MAX_DEPTH should remain
# unchanged.
self.assertEqual(topological_ordering, event_id_to_depth[event_id])
def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None:
"""Test that the background update does not cap topological_ordering for
rooms with old room versions."""
event_id_to_depth = self.create_room(RoomVersions.V5)
# Run the background update
progress = {"room_id": ""}
batch_size = 10
num_rooms = self.get_success(
self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
)
# Verify the number of rooms processed
self.assertEqual(num_rooms, 0)
# Verify that the topological_ordering of events has been capped at
# MAX_DEPTH
rows = self.get_success(
self.db_pool.simple_select_list(
table="events",
keyvalues={"room_id": self.room_id},
retcols=["event_id", "topological_ordering"],
)
)
# Assert that the topological_ordering of events has not been changed
# from their depth.
self.assertDictEqual(event_id_to_depth, dict(rows))
+1 -1
View File
@@ -149,7 +149,7 @@ class _DummyStore:
async def get_partial_state_events(
self, event_ids: Collection[str]
) -> Dict[str, bool]:
return {e: False for e in event_ids}
return dict.fromkeys(event_ids, False)
async def get_state_group_delta(
self, name: str
+1 -1
View File
@@ -48,7 +48,7 @@ def setup_logging() -> None:
# We exclude `%(asctime)s` from this format because the Twisted logger adds its own
# timestamp
log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s"
log_format = "%(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
handler = ToTwistedHandler()
formatter = logging.Formatter(log_format)