1
0

Compare commits

..

13 Commits

Author SHA1 Message Date
Andrew Morgan 41e22d1e95 newsfile 2025-07-16 10:42:08 +01:00
Andrew Morgan 4f9c523cb5 Set minimum python version to 3.9.12
This matches Twisted's minimum required version.

See twisted/twisted@27674f6
2025-07-16 10:41:58 +01:00
Eric Eastwood fc10a5ee29 Refactor Measure block metrics to be homeserver-scoped (v2) (#18601)
Refactor `Measure` block metrics to be homeserver-scoped (add
`server_name` label to block metrics).

Part of https://github.com/element-hq/synapse/issues/18592


### Testing strategy

#### See behavior of previous `metrics` listener

 1. Add the `metrics` listener in your `homeserver.yaml`
    ```yaml
    listeners:
      - port: 9323
        type: metrics
        bind_addresses: ['127.0.0.1']
    ```
1. Start the homeserver: `poetry run synapse_homeserver --config-path
homeserver.yaml`
 1. Fetch `http://localhost:9323/metrics`
1. Observe response includes the block metrics
(`synapse_util_metrics_block_count`,
`synapse_util_metrics_block_in_flight`, etc)


#### See behavior of the `http` `metrics` resource

1. Add the `metrics` resource to a new or existing `http` listeners in
your `homeserver.yaml`
    ```yaml
    listeners:
      - port: 9322
        type: http
        bind_addresses: ['127.0.0.1']
        resources:
          - names: [metrics]
            compress: false
    ```
1. Start the homeserver: `poetry run synapse_homeserver --config-path
homeserver.yaml`
1. Fetch `http://localhost:9322/_synapse/metrics` (it's just a `GET`
request so you can even do in the browser)
1. Observe response includes the block metrics
(`synapse_util_metrics_block_count`,
`synapse_util_metrics_block_in_flight`, etc)
2025-07-15 15:55:23 -05:00
Eric Eastwood d72c278a07 Remove allow_no_prev_events option (MSC2716 cleanup) (#18676)
This option is no longer used
since we backed out the MSC2716 changes in
https://github.com/matrix-org/synapse/pull/15748 and is even mentioned
as a follow-up task in the PR description there.

The `allow_no_prev_events` option was first introduced in
https://github.com/matrix-org/synapse/pull/11243 to support MSC2716 back
in the day.
2025-07-15 15:53:56 -05:00
Johannes Marbach b274d6561c Document that some config options for the user directory are in violation of the Matrix spec (#18548)
Fix #17534

Signed-off-by: Johannes Marbach <n0-0ne+github@mailbox.org>
2025-07-15 13:25:25 -05:00
Andrew Morgan 49cb78376e Advertise support for Matrix v1.12 (#18647) 2025-07-15 15:07:20 +01:00
Eric Eastwood 88f38ea149 Correct version that recaptcha_{private,public}_key_path config options were introduced (#18684)
Introduced in https://github.com/element-hq/synapse/pull/17984

I already see a
[`v1.134.0rc1`](https://github.com/element-hq/synapse/releases/tag/v1.134.0rc1)
tag from 5 days ago so I assume
https://github.com/element-hq/synapse/pull/17984 will actually ship in
the next release (which will be `v1.135.0`)
2025-07-15 09:05:45 -05:00
Andrew Morgan 5f027adb33 Update URL Preview code to work with lxml 6.0.0 (#18622) 2025-07-15 15:04:29 +01:00
Erik Johnston e6dbbbb315 Merge remote-tracking branch 'origin/master' into develop 2025-07-15 14:55:25 +01:00
dependabot[bot] 78ce4dc26f Bump mypy from 1.13.0 to 1.16.1 (#18653) 2025-07-15 14:42:54 +01:00
Erik Johnston 60be549c0c 1.134.0 2025-07-15 14:22:54 +01:00
reivilibre 97d2738eef Fix CPU and database spinning when retrying sending events to servers whilst at the same time purging those events. (#18499)
Fixes: #18491

Fix hotlooping due to skipped PDUs if there is still no progress to be
made.
This could bite if the event was purged since being skipped during
catch-up.

Signed-off-by: Olivier 'reivilibre <oliverw@matrix.org>
2025-07-15 12:01:41 +01:00
dependabot[bot] 945e22303c Bump phonenumbers from 9.0.8 to 9.0.9 (#18681) 2025-07-15 11:47:59 +01:00
84 changed files with 665 additions and 816 deletions
+7
View File
@@ -1,3 +1,10 @@
# Synapse 1.134.0 (2025-07-15)
No significant changes since 1.134.0rc1.
# Synapse 1.134.0rc1 (2025-07-09)
### Features
Generated
+4 -10
View File
@@ -65,12 +65,6 @@ dependencies = [
"windows-targets",
]
[[package]]
name = "base64"
version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64"
version = "0.22.1"
@@ -380,7 +374,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb"
dependencies = [
"base64 0.22.1",
"base64",
"bytes",
"headers-core",
"http",
@@ -500,7 +494,7 @@ version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb"
dependencies = [
"base64 0.22.1",
"base64",
"bytes",
"futures-channel",
"futures-core",
@@ -1190,7 +1184,7 @@ version = "0.12.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
dependencies = [
"base64 0.22.1",
"base64",
"bytes",
"futures-core",
"futures-util",
@@ -1464,7 +1458,7 @@ name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"base64 0.21.7",
"base64",
"blake2",
"bytes",
"futures",
+1
View File
@@ -0,0 +1 @@
Fix CPU and database spinning when retrying sending events to servers whilst at the same time purging those events.
+1
View File
@@ -0,0 +1 @@
Document that some config options for the user directory are in violation of the Matrix spec.
+1
View File
@@ -0,0 +1 @@
Refactor `Measure` block metrics to be homeserver-scoped.
+1
View File
@@ -0,0 +1 @@
Raise minimum Python version to `3.9.12`.
+1
View File
@@ -0,0 +1 @@
Update URL Preview code to work with `lxml` 6.0.0+.
+1
View File
@@ -0,0 +1 @@
Advertise support for Matrix v1.12.
+1
View File
@@ -0,0 +1 @@
Fix typing errors with upgraded mypy version.
+1
View File
@@ -0,0 +1 @@
Remove unused `allow_no_prev_events` option when creating an event.
+1
View File
@@ -0,0 +1 @@
Add `recaptcha_private_key_path` and `recaptcha_public_key_path` config option.
-1
View File
@@ -1 +0,0 @@
Add support for MSC4291: Room IDs as hashes of create event.
+6
View File
@@ -1,3 +1,9 @@
matrix-synapse-py3 (1.134.0) stable; urgency=medium
* New Synapse release 1.134.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Jul 2025 14:22:50 +0100
matrix-synapse-py3 (1.134.0~rc1) stable; urgency=medium
* New Synapse release 1.134.0rc1.
@@ -2363,7 +2363,7 @@ recaptcha_public_key: YOUR_PUBLIC_KEY
The file should be a plain text file, containing only the public key. Synapse reads the public key from the given file once at startup.
_Added in Synapse 1.134.0._
_Added in Synapse 1.135.0._
Defaults to `null`.
@@ -2387,7 +2387,7 @@ recaptcha_private_key: YOUR_PRIVATE_KEY
The file should be a plain text file, containing only the private key. Synapse reads the private key from the given file once at startup.
_Added in Synapse 1.134.0._
_Added in Synapse 1.135.0._
Defaults to `null`.
@@ -3808,7 +3808,11 @@ encryption_enabled_by_default_for_room_type: invite
This setting has the following sub-options:
* `enabled` (boolean): Defines whether users can search the user directory. If false then empty responses are returned to all queries. Defaults to `true`.
* `enabled` (boolean): Defines whether users can search the user directory. If `false` then empty responses are returned to all queries.
*Warning: While the homeserver may determine which subset of users are searched, the Matrix specification requires homeservers to include (at minimum) users visible in public rooms and users sharing a room with the requester. Using `false` improves performance but violates this requirement.*
Defaults to `true`.
* `search_all_users` (boolean): Defines whether to search all users visible to your homeserver at the time the search is performed. If set to true, will return all users known to the homeserver matching the search query. If false, search results will only contain users visible in public rooms and users sharing a room with the requester.
Generated
+55 -43
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -1425,50 +1425,51 @@ docs = ["sphinx (>=8,<9)", "sphinx-autobuild"]
[[package]]
name = "mypy"
version = "1.13.0"
version = "1.16.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
{file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"},
{file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"},
{file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7469ee5902c95542bea7ee545f7006508c65c8c54b06dc2c92676ce526f3ea"},
{file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:352025753ef6a83cb9e7f2427319bb7875d1fdda8439d1e23de12ab164179574"},
{file = "mypy-1.16.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff9fa5b16e4c1364eb89a4d16bcda9987f05d39604e1e6c35378a2987c1aac2d"},
{file = "mypy-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:1256688e284632382f8f3b9e2123df7d279f603c561f099758e66dd6ed4e8bd6"},
{file = "mypy-1.16.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:472e4e4c100062488ec643f6162dd0d5208e33e2f34544e1fc931372e806c0cc"},
{file = "mypy-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea16e2a7d2714277e349e24d19a782a663a34ed60864006e8585db08f8ad1782"},
{file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08e850ea22adc4d8a4014651575567b0318ede51e8e9fe7a68f25391af699507"},
{file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22d76a63a42619bfb90122889b903519149879ddbf2ba4251834727944c8baca"},
{file = "mypy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2c7ce0662b6b9dc8f4ed86eb7a5d505ee3298c04b40ec13b30e572c0e5ae17c4"},
{file = "mypy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:211287e98e05352a2e1d4e8759c5490925a7c784ddc84207f4714822f8cf99b6"},
{file = "mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d"},
{file = "mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9"},
{file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79"},
{file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15"},
{file = "mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd"},
{file = "mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b"},
{file = "mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438"},
{file = "mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536"},
{file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f"},
{file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359"},
{file = "mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be"},
{file = "mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee"},
{file = "mypy-1.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fc688329af6a287567f45cc1cefb9db662defeb14625213a5b7da6e692e2069"},
{file = "mypy-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e198ab3f55924c03ead626ff424cad1732d0d391478dfbf7bb97b34602395da"},
{file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09aa4f91ada245f0a45dbc47e548fd94e0dd5a8433e0114917dc3b526912a30c"},
{file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13c7cd5b1cb2909aa318a90fd1b7e31f17c50b242953e7dd58345b2a814f6383"},
{file = "mypy-1.16.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:58e07fb958bc5d752a280da0e890c538f1515b79a65757bbdc54252ba82e0b40"},
{file = "mypy-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:f895078594d918f93337a505f8add9bd654d1a24962b4c6ed9390e12531eb31b"},
{file = "mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37"},
{file = "mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
mypy_extensions = ">=1.0.0"
pathspec = ">=0.9.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
typing_extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
@@ -1566,16 +1567,28 @@ files = [
[package.extras]
dev = ["jinja2"]
[[package]]
name = "pathspec"
version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
[[package]]
name = "phonenumbers"
version = "9.0.8"
version = "9.0.9"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "phonenumbers-9.0.8-py2.py3-none-any.whl", hash = "sha256:53d357111c0ead0d6408ae443613b18d3a053431ca1ddf7e881457c0969afcf9"},
{file = "phonenumbers-9.0.8.tar.gz", hash = "sha256:16f03f2cf65b5eee99ed25827d810febcab92b5d76f977e425fcd2e4ca6d4865"},
{file = "phonenumbers-9.0.9-py2.py3-none-any.whl", hash = "sha256:13b91aa153f87675902829b38a556bad54824f9c121b89588bbb5fa8550d97ef"},
{file = "phonenumbers-9.0.9.tar.gz", hash = "sha256:c640545019a07e68b0bea57a5fede6eef45c7391165d28935f45615f9a567a5b"},
]
[[package]]
@@ -1713,7 +1726,6 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@@ -3357,5 +3369,5 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = "^3.9.0"
content-hash = "1629969968eaaf20a8f49ee3cc0075b25e5dee7e81503fff66042ece72793103"
python-versions = "^3.9.12"
content-hash = "e0373e8c1b4cffc0d5b48fcf58b1be5c755dd31ce6671f5dd97ae9c50a6dbdb6"
+8 -2
View File
@@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.134.0rc1"
version = "1.134.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@@ -159,7 +159,13 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.9.0"
# We aim to support all versions of Python currently supported upstream as per
# our dependency deprecation policy:
# https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy
#
# 3.9.12 is currently the minimum version due to Twisted requiring it:
# https://github.com/twisted/twisted/commit/27674f64d8a553ad5e68913bfb1c936e6fdeb46a
python = "^3.9.12"
# Mandatory Dependencies
# ----------------------
+10 -3
View File
@@ -2703,7 +2703,7 @@ properties:
Synapse reads the public key from the given file once at startup.
_Added in Synapse 1.134.0._
_Added in Synapse 1.135.0._
default: null
examples:
- /path/to/key/file
@@ -2726,7 +2726,7 @@ properties:
Synapse reads the private key from the given file once at startup.
_Added in Synapse 1.134.0._
_Added in Synapse 1.135.0._
default: null
examples:
- /path/to/key/file
@@ -4719,8 +4719,15 @@ properties:
enabled:
type: boolean
description: >-
Defines whether users can search the user directory. If false then
Defines whether users can search the user directory. If `false` then
empty responses are returned to all queries.
*Warning: While the homeserver may determine which subset of users are
searched, the Matrix specification requires homeservers to include (at
minimum) users visible in public rooms and users sharing a room with
the requester. Using `false` improves performance but violates this
requirement.*
default: true
search_all_users:
type: boolean
-40
View File
@@ -36,14 +36,12 @@ class EventFormatVersions:
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
ROOM_V11_PLUS_MSC4291 = 4 # MSC4291 room IDs as hashes: introduced for room MSC4291v11
KNOWN_EVENT_FORMAT_VERSIONS = {
EventFormatVersions.ROOM_V1_V2,
EventFormatVersions.ROOM_V3,
EventFormatVersions.ROOM_V4_PLUS,
EventFormatVersions.ROOM_V11_PLUS_MSC4291,
}
@@ -111,8 +109,6 @@ class RoomVersion:
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
# MSC3757: Restricting who can overwrite a state event
msc3757_enabled: bool
# MSC4291: Room IDs as hashes of the create event
msc4291_room_ids_as_hashes: bool
class RoomVersions:
@@ -135,7 +131,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V2 = RoomVersion(
"2",
@@ -156,7 +151,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V3 = RoomVersion(
"3",
@@ -177,7 +171,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V4 = RoomVersion(
"4",
@@ -198,7 +191,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V5 = RoomVersion(
"5",
@@ -219,7 +211,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V6 = RoomVersion(
"6",
@@ -240,7 +231,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V7 = RoomVersion(
"7",
@@ -261,7 +251,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V8 = RoomVersion(
"8",
@@ -282,7 +271,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V9 = RoomVersion(
"9",
@@ -303,7 +291,6 @@ class RoomVersions:
enforce_int_power_levels=False,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
V10 = RoomVersion(
"10",
@@ -324,7 +311,6 @@ class RoomVersions:
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
@@ -346,7 +332,6 @@ class RoomVersions:
enforce_int_power_levels=True,
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
MSC3757v10 = RoomVersion(
# MSC3757 (Restricting who can overwrite a state event) based on room version "10"
@@ -368,7 +353,6 @@ class RoomVersions:
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=True,
msc4291_room_ids_as_hashes=False,
)
V11 = RoomVersion(
"11",
@@ -389,7 +373,6 @@ class RoomVersions:
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=False,
)
MSC3757v11 = RoomVersion(
# MSC3757 (Restricting who can overwrite a state event) based on room version "11"
@@ -411,28 +394,6 @@ class RoomVersions:
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=True,
msc4291_room_ids_as_hashes=False,
)
MSC4291v11 = RoomVersion(
"org.matrix.msc4291.11",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V11_PLUS_MSC4291,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
implicit_room_creator=True, # Used by MSC3820
updated_redaction_rules=True, # Used by MSC3820
restricted_join_rule=True,
restricted_join_rule_fix=True,
knock_join_rule=True,
msc3389_relation_redactions=False,
knock_restricted_join_rule=True,
enforce_int_power_levels=True,
msc3931_push_features=(),
msc3757_enabled=False,
msc4291_room_ids_as_hashes=True,
)
@@ -452,7 +413,6 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V11,
RoomVersions.MSC3757v10,
RoomVersions.MSC3757v11,
RoomVersions.MSC4291v11,
)
}
+3
View File
@@ -42,6 +42,9 @@ class CasConfig(Config):
self.cas_enabled = cas_config and cas_config.get("enabled", True)
if self.cas_enabled:
if not isinstance(cas_config, dict):
raise ConfigError("Must be a dictionary", ("cas_config",))
self.cas_server_url = cas_config["server_url"]
# TODO Update this to a _synapse URL.
+4 -1
View File
@@ -212,11 +212,14 @@ class KeyConfig(Config):
"Config options that expect an in-line secret as value are disabled",
("form_secret",),
)
if form_secret is not None and not isinstance(form_secret, str):
raise ConfigError("Config option must be a string", ("form_secret",))
form_secret_path = config.get("form_secret_path", None)
if form_secret_path:
if form_secret:
raise ConfigError(CONFLICTING_FORM_SECRET_OPTS_ERROR)
self.form_secret = read_file(
self.form_secret: Optional[str] = read_file(
form_secret_path, ("form_secret_path",)
).strip()
else:
+7 -1
View File
@@ -238,10 +238,16 @@ class WorkerConfig(Config):
if worker_replication_secret_path:
if worker_replication_secret:
raise ConfigError(CONFLICTING_WORKER_REPLICATION_SECRET_OPTS_ERROR)
self.worker_replication_secret = read_file(
self.worker_replication_secret: Optional[str] = read_file(
worker_replication_secret_path, ("worker_replication_secret_path",)
).strip()
else:
if worker_replication_secret is not None and not isinstance(
worker_replication_secret, str
):
raise ConfigError(
"Config option must be a string", ("worker_replication_secret",)
)
self.worker_replication_secret = worker_replication_secret
self.worker_name = config.get("worker_name", self.worker_app)
-3
View File
@@ -101,9 +101,6 @@ def compute_content_hash(
event_dict.pop("outlier", None)
event_dict.pop("destinations", None)
# N.B. no need to pop the room_id from create events in MSC4291 rooms
# as they shouldn't have one.
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
+11 -27
View File
@@ -261,8 +261,7 @@ async def check_state_independent_auth_rules(
f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}",
)
# 2.3 ... If there are entries which were themselves rejected under the checks performed on receipt
# of a PDU, reject.
# We also need to check that the auth event itself is not rejected.
if auth_event.rejected_reason:
raise AuthError(
403,
@@ -272,7 +271,7 @@ async def check_state_independent_auth_rules(
auth_dict[k] = auth_event_id
# 2.4. If event does not have a m.room.create in its auth_events, reject.
# 3. If event does not have a m.room.create in its auth_events, reject.
creation_event = auth_dict.get((EventTypes.Create, ""), None)
if not creation_event:
raise AuthError(403, "No create event in auth events")
@@ -312,14 +311,13 @@ def check_state_dependent_auth_rules(
# Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed
# so produce a more intelligible error if we don't have one.
create_event = auth_dict.get(CREATE_KEY)
if create_event is None:
if auth_dict.get(CREATE_KEY) is None:
raise AuthError(
403, f"Event {event.event_id} is missing a create event in auth_events."
)
# additional check for m.federate
creating_domain = get_domain_from_id(create_event.sender)
creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender)
if creating_domain != originating_domain:
if not _can_federate(event, auth_dict):
@@ -472,20 +470,12 @@ def _check_create(event: "EventBase") -> None:
if event.prev_event_ids():
raise AuthError(403, "Create event has prev events")
if event.room_version.msc4291_room_ids_as_hashes:
# 1.2 If the create event has a room_id, reject
if "room_id" in event:
raise AuthError(403, "Create event has a room_id")
else:
# 1.2 If the domain of the room_id does not match the domain of the sender,
# reject.
if not event.room_version.msc4291_room_ids_as_hashes:
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(
403, "Creation event's room_id domain does not match sender's"
)
# 1.2 If the domain of the room_id does not match the domain of the sender,
# reject.
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(403, "Creation event's room_id domain does not match sender's")
# 1.3 If content.room_version is present and is not a recognised version, reject
room_version_prop = event.content.get("room_version", "1")
@@ -543,13 +533,7 @@ def _is_membership_change_allowed(
target_user_id = event.state_key
# We need the create event in order to check if we can federate or not.
# If it's missing, yell loudly. Previously we only did this inside the
# _can_federate check.
create_event = auth_events.get((EventTypes.Create, ""))
if not create_event:
raise AuthError(403, "Create event missing from auth_events")
creating_domain = get_domain_from_id(create_event.sender)
creating_domain = get_domain_from_id(event.room_id)
target_domain = get_domain_from_id(target_user_id)
if creating_domain != target_domain:
if not _can_federate(event, auth_events):
+2 -79
View File
@@ -44,10 +44,7 @@ from unpaddedbase64 import encode_base64
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.synapse_rust.events import EventInternalMetadata
from synapse.types import (
JsonDict,
StrCollection,
)
from synapse.types import JsonDict, StrCollection
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
@@ -212,6 +209,7 @@ class EventBase(metaclass=abc.ABCMeta):
content: DictProperty[JsonDict] = DictProperty("content")
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
room_id: DictProperty[str] = DictProperty("room_id")
sender: DictProperty[str] = DictProperty("sender")
# TODO state_key should be Optional[str]. This is generally asserted in Synapse
# by calling is_state() first (which ensures it is not None), but it is hard (not possible?)
@@ -226,10 +224,6 @@ class EventBase(metaclass=abc.ABCMeta):
def event_id(self) -> str:
raise NotImplementedError()
@property
def room_id(self) -> str:
raise NotImplementedError()
@property
def membership(self) -> str:
return self.content["membership"]
@@ -392,10 +386,6 @@ class FrozenEvent(EventBase):
def event_id(self) -> str:
return self._event_id
@property
def room_id(self) -> str:
return self._dict["room_id"]
class FrozenEventV2(EventBase):
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
@@ -453,10 +443,6 @@ class FrozenEventV2(EventBase):
self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
return self._event_id
@property
def room_id(self) -> str:
return self._dict["room_id"]
def prev_event_ids(self) -> List[str]:
"""Returns the list of prev event IDs. The order matches the order
specified in the event, though there is no meaning to it.
@@ -495,67 +481,6 @@ class FrozenEventV3(FrozenEventV2):
return self._event_id
class FrozenEventV4(FrozenEventV3):
"""FrozenEventV4 for MSC4291 room IDs are hashes"""
format_version = EventFormatVersions.ROOM_V11_PLUS_MSC4291
"""Override the room_id for m.room.create events"""
def __init__(
self,
event_dict: JsonDict,
room_version: RoomVersion,
internal_metadata_dict: Optional[JsonDict] = None,
rejected_reason: Optional[str] = None,
):
super().__init__(
event_dict=event_dict,
room_version=room_version,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
self._room_id: Optional[str] = None
@property
def room_id(self) -> str:
# if we have calculated the room ID already, don't do it again.
if self._room_id:
return self._room_id
is_create_event = self.type == EventTypes.Create and self.get_state_key() == ""
# for non-create events: use the supplied value from the JSON, as per FrozenEventV3
if not is_create_event:
self._room_id = self._dict["room_id"]
assert self._room_id is not None
return self._room_id
# for create events: calculate the room ID
from synapse.crypto.event_signing import compute_event_reference_hash
self._room_id = "!" + encode_base64(
compute_event_reference_hash(self)[1], urlsafe=True
)
return self._room_id
def auth_event_ids(self) -> StrCollection:
"""Returns the list of auth event IDs. The order matches the order
specified in the event, though there is no meaning to it.
Returns:
The list of event IDs of this event's auth_events
Includes the creation event ID for convenience of all the codepaths
which expects the auth chain to include the creator ID, even though
it's explicitly not included on the wire. Excludes the create event
for the create event itself.
"""
create_event_id = "$" + self.room_id[1:]
assert create_event_id not in self._dict["auth_events"]
if self.type == EventTypes.Create and self.get_state_key() == "":
return self._dict["auth_events"] # should be []
return self._dict["auth_events"] + [create_event_id]
def _event_type_from_format_version(
format_version: int,
) -> Type[Union[FrozenEvent, FrozenEventV2, FrozenEventV3]]:
@@ -575,8 +500,6 @@ def _event_type_from_format_version(
return FrozenEventV2
elif format_version == EventFormatVersions.ROOM_V4_PLUS:
return FrozenEventV3
elif format_version == EventFormatVersions.ROOM_V11_PLUS_MSC4291:
return FrozenEventV4
else:
raise Exception("No event format %r" % (format_version,))
+3 -30
View File
@@ -82,8 +82,7 @@ class EventBuilder:
room_version: RoomVersion
# MSC4291 makes the room ID == the create event ID. This means the create event has no room_id.
room_id: Optional[str]
room_id: str
type: str
sender: str
@@ -143,14 +142,7 @@ class EventBuilder:
Returns:
The signed and hashed event.
"""
# Create events always have empty auth_events.
if self.type == EventTypes.Create and self.is_state() and self.state_key == "":
auth_event_ids = []
# Calculate auth_events for non-create events
if auth_event_ids is None:
# Every non-create event must have a room ID
assert self.room_id is not None
state_ids = await self._state.compute_state_after_events(
self.room_id,
prev_event_ids,
@@ -232,31 +224,12 @@ class EventBuilder:
"auth_events": auth_events,
"prev_events": prev_events,
"type": self.type,
"room_id": self.room_id,
"sender": self.sender,
"content": self.content,
"unsigned": self.unsigned,
"depth": depth,
}
if self.room_id is not None:
event_dict["room_id"] = self.room_id
if self.room_version.msc4291_room_ids_as_hashes:
# In MSC4291: the create event has no room ID as the create event ID /is/ the room ID.
if (
self.type == EventTypes.Create
and self.is_state()
and self._state_key == ""
):
assert self.room_id is None
else:
# All other events do not reference the create event in auth_events, as the room ID
# /is/ the create event. However, the rest of the code (for consistency between room
# versions) assume that the create event remains part of the auth events. c.f. event
# class which automatically adds the create event when `.auth_event_ids()` is called
assert self.room_id is not None
create_event_id = "$" + self.room_id[1:]
auth_event_ids.remove(create_event_id)
event_dict["auth_events"] = auth_event_ids
if self.is_state():
event_dict["state_key"] = self._state_key
@@ -312,7 +285,7 @@ class EventBuilderFactory:
room_version=room_version,
type=key_values["type"],
state_key=key_values.get("state_key"),
room_id=key_values.get("room_id"),
room_id=key_values["room_id"],
sender=key_values["sender"],
content=key_values.get("content", {}),
unsigned=key_values.get("unsigned", {}),
+1 -4
View File
@@ -176,12 +176,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
if room_version.updated_redaction_rules:
# MSC2176 rules state that create events cannot have their `content` redacted.
new_content = event_dict["content"]
if not room_version.implicit_room_creator:
elif not room_version.implicit_room_creator:
# Some room versions give meaning to `creator`
add_fields("creator")
if room_version.msc4291_room_ids_as_hashes:
# room_id is not allowed on the create event as it's derived from the event ID
allowed_keys.remove("room_id")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
+1 -18
View File
@@ -183,18 +183,8 @@ class EventValidator:
fields an event would have
"""
create_event_as_room_id = (
event.room_version.msc4291_room_ids_as_hashes
and event.type == EventTypes.Create
and hasattr(event, "state_key")
and event.state_key == ""
)
strings = ["room_id", "sender", "type"]
if create_event_as_room_id:
strings.remove("room_id")
if hasattr(event, "state_key"):
strings.append("state_key")
@@ -202,14 +192,7 @@ class EventValidator:
if not isinstance(getattr(event, s), str):
raise SynapseError(400, "Not '%s' a string type" % (s,))
if not create_event_as_room_id:
assert event.room_id is not None
RoomID.from_string(event.room_id)
if event.room_version.msc4291_room_ids_as_hashes and not RoomID.is_valid(
event.room_id
):
raise SynapseError(400, f"Invalid room ID '{event.room_id}'")
RoomID.from_string(event.room_id)
UserID.from_string(event.sender)
if event.type == EventTypes.Message:
-15
View File
@@ -342,21 +342,6 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
if room_version.strict_canonicaljson:
validate_canonicaljson(pdu_json)
# enforce that MSC4291 auth events don't include the create event.
# N.B. if they DO include a spurious create event, it'll fail auth checks elsewhere, so we don't
# need to do expensive DB lookups to find which event ID is the create event here.
if room_version.msc4291_room_ids_as_hashes:
room_id = pdu_json.get("room_id")
if room_id:
create_event_id = "$" + room_id[1:]
auth_events = pdu_json.get("auth_events")
if auth_events:
if create_event_id in auth_events:
raise SynapseError(
400,
"auth_events must not contain the create event",
Codes.BAD_JSON,
)
event = make_event_from_dict(pdu_json, room_version)
return event
+3 -1
View File
@@ -156,7 +156,9 @@ class FederationRemoteSendQueue(AbstractFederationSender):
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
"""Clear all the queues from before a given position"""
with Measure(self.clock, "send_queue._clear"):
with Measure(
self.clock, name="send_queue._clear", server_name=self.server_name
):
# Delete things out of presence maps
keys = self.presence_destinations.keys()
i = self.presence_destinations.bisect_left(position_to_delete)
+5 -1
View File
@@ -657,7 +657,11 @@ class FederationSender(AbstractFederationSender):
logger.debug(
"Handling %i events in room %s", len(events), events[0].room_id
)
with Measure(self.clock, "handle_room_events"):
with Measure(
self.clock,
name="handle_room_events",
server_name=self.server_name,
):
for event in events:
await handle_event(event)
@@ -129,6 +129,8 @@ class PerDestinationQueue:
# The stream_ordering of the most recent PDU that was discarded due to
# being in catch-up mode.
# Can be set to zero if no PDU has been discarded since the last time
# we queried for new PDUs during catch-up.
self._catchup_last_skipped: int = 0
# Cache of the last successfully-transmitted stream ordering for this
@@ -462,8 +464,18 @@ class PerDestinationQueue:
# of a race condition, so we check that no new events have been
# skipped due to us being in catch-up mode
if self._catchup_last_skipped > last_successful_stream_ordering:
if (
self._catchup_last_skipped != 0
and self._catchup_last_skipped > last_successful_stream_ordering
):
# another event has been skipped because we were in catch-up mode
# As an exception to this case: we can hit this branch if the
# room has been purged whilst we have been looping.
# In that case we avoid hot-looping by resetting the 'catch-up skipped
# PDU' flag.
# Then if there is still no progress to be made at the next iteration,
# we can exit catch-up mode.
self._catchup_last_skipped = 0
continue
# we are done catching up!
@@ -58,7 +58,7 @@ class TransactionManager:
"""
def __init__(self, hs: "synapse.server.HomeServer"):
self._server_name = hs.hostname
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self._store = hs.get_datastores().main
self._transaction_actions = TransactionActions(self._store)
@@ -116,7 +116,7 @@ class TransactionManager:
transaction = Transaction(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
origin=self._server_name,
origin=self.server_name,
destination=destination,
pdus=[p.get_pdu_json() for p in pdus],
edus=[edu.get_dict() for edu in edus],
+9 -2
View File
@@ -73,6 +73,7 @@ events_processed_counter = Counter("synapse_handlers_appservice_events_processed
class ApplicationServicesHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.is_mine_id = hs.is_mine_id
self.appservice_api = hs.get_application_service_api()
@@ -120,7 +121,9 @@ class ApplicationServicesHandler:
@wrap_as_background_process("notify_interested_services")
async def _notify_interested_services(self, max_token: RoomStreamToken) -> None:
with Measure(self.clock, "notify_interested_services"):
with Measure(
self.clock, name="notify_interested_services", server_name=self.server_name
):
self.is_processing = True
try:
upper_bound = -1
@@ -329,7 +332,11 @@ class ApplicationServicesHandler:
users: Collection[Union[str, UserID]],
) -> None:
logger.debug("Checking interested services for %s", stream_key)
with Measure(self.clock, "notify_interested_services_ephemeral"):
with Measure(
self.clock,
name="notify_interested_services_ephemeral",
server_name=self.server_name,
):
for service in services:
if stream_key == StreamKeyType.TYPING:
# Note that we don't persist the token (via set_appservice_stream_type_pos)
+1
View File
@@ -174,6 +174,7 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]:
# Accept both "phone" and "number" as valid keys in m.id.phone
phone_number = identifier.get("phone", identifier["number"])
assert isinstance(phone_number, str)
# Convert user-provided phone number to a consistent representation
msisdn = phone_number_to_msisdn(identifier["country"], phone_number)
+2 -1
View File
@@ -378,7 +378,8 @@ class CasHandler:
# Arbitrarily use the first attribute found.
display_name = cas_response.attributes.get(
self._cas_displayname_attribute, [None]
self._cas_displayname_attribute, # type: ignore[arg-type]
[None],
)[0]
return UserAttributes(localpart=localpart, display_name=display_name)
+4 -1
View File
@@ -54,6 +54,7 @@ logger = logging.getLogger(__name__)
class DelayedEventsHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self._store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._config = hs.config
@@ -159,7 +160,9 @@ class DelayedEventsHandler:
# Loop round handling deltas until we're up to date
while True:
with Measure(self._clock, "delayed_events_delta"):
with Measure(
self._clock, name="delayed_events_delta", server_name=self.server_name
):
room_max_stream_ordering = self._store.get_room_max_stream_ordering()
if self._event_pos == room_max_stream_ordering:
return
+4 -1
View File
@@ -526,6 +526,8 @@ class DeviceHandler(DeviceWorkerHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self.federation_sender = hs.get_federation_sender()
self._account_data_handler = hs.get_account_data_handler()
self._storage_controllers = hs.get_storage_controllers()
@@ -1215,7 +1217,8 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self.device_handler = device_handler
self._notifier = hs.get_notifier()
+11 -52
View File
@@ -476,16 +476,16 @@ _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000
class EventCreationHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.validator = EventValidator()
self.event_builder_factory = hs.get_event_builder_factory()
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self.auth_blocking = hs.get_auth_blocking()
self._event_auth_handler = hs.get_event_auth_handler()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.profile_handler = hs.get_profile_handler()
self.event_builder_factory = hs.get_event_builder_factory()
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.config = hs.config
self.require_membership_for_aliases = (
@@ -568,7 +568,6 @@ class EventCreationHandler:
requester: Requester,
event_dict: dict,
txn_id: Optional[str] = None,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -594,10 +593,6 @@ class EventCreationHandler:
requester
event_dict: An entire event
txn_id
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -678,10 +673,7 @@ class EventCreationHandler:
Codes.USER_ACCOUNT_SUSPENDED,
)
is_create_event = (
event_dict["type"] == EventTypes.Create and event_dict["state_key"] == ""
)
if is_create_event:
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
room_version_id = event_dict["content"]["room_version"]
maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not maybe_room_version_obj:
@@ -720,7 +712,6 @@ class EventCreationHandler:
event, unpersisted_context = await self.create_new_client_event(
builder=builder,
requester=requester,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
state_event_ids=state_event_ids,
@@ -788,7 +779,6 @@ class EventCreationHandler:
"""
# the only thing the user can do is join the server notices room.
if builder.type == EventTypes.Member:
assert builder.room_id is not None
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
return await self.store.is_server_notice_room(builder.room_id)
@@ -949,7 +939,6 @@ class EventCreationHandler:
self,
requester: Requester,
event_dict: dict,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
ratelimit: bool = True,
@@ -966,10 +955,6 @@ class EventCreationHandler:
Args:
requester: The requester sending the event.
event_dict: An entire event.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids:
The event IDs to use as the prev events.
Should normally be left as None to automatically request them
@@ -1055,7 +1040,6 @@ class EventCreationHandler:
return await self._create_and_send_nonmember_event_locked(
requester=requester,
event_dict=event_dict,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
ratelimit=ratelimit,
@@ -1069,7 +1053,6 @@ class EventCreationHandler:
self,
requester: Requester,
event_dict: dict,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
ratelimit: bool = True,
@@ -1101,7 +1084,6 @@ class EventCreationHandler:
requester,
event_dict,
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
outlier=outlier,
@@ -1184,7 +1166,6 @@ class EventCreationHandler:
self,
builder: EventBuilder,
requester: Optional[Requester] = None,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -1204,10 +1185,6 @@ class EventCreationHandler:
Args:
builder:
requester:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -1245,7 +1222,6 @@ class EventCreationHandler:
if state_event_ids is not None:
# Do a quick check to make sure that prev_event_ids is present to
# make the type-checking around `builder.build` happy.
# prev_event_ids could be an empty array though.
assert prev_event_ids is not None
temp_event = await builder.build(
@@ -1271,32 +1247,16 @@ class EventCreationHandler:
% (len(prev_event_ids),)
)
else:
if builder.type == EventTypes.Create:
prev_event_ids = []
else:
assert builder.room_id is not None
prev_event_ids = await self.store.get_prev_events_for_room(
builder.room_id
)
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
# We now ought to have some `prev_events` (unless it's a create event).
#
# Do a quick sanity check here, rather than waiting until we've created the
# event and then try to auth it (which fails with a somewhat confusing "No
# create event in auth events")
if allow_no_prev_events:
# We allow events with no `prev_events` but it better have some `auth_events`
assert (
builder.type == EventTypes.Create
# Allow an event to have empty list of prev_event_ids
# only if it has auth_event_ids.
or auth_event_ids
), (
"Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
)
else:
# we now ought to have some prev_events (unless it's a create event).
assert builder.type == EventTypes.Create or prev_event_ids, (
"Attempting to create a non-m.room.create event with no prev_events"
)
assert builder.type == EventTypes.Create or len(prev_event_ids) > 0, (
"Attempting to create an event with no prev_events"
)
if for_batch:
assert prev_event_ids is not None
@@ -2163,7 +2123,6 @@ class EventCreationHandler:
original_event.room_version, third_party_result
)
self.validator.validate_builder(builder)
assert builder.room_id is not None
except SynapseError as e:
raise Exception(
"Third party rules module created an invalid event: " + e.msg,
+12 -4
View File
@@ -747,6 +747,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
class PresenceHandler(BasePresenceHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self.wheel_timer: WheelTimer[str] = WheelTimer()
self.notifier = hs.get_notifier()
@@ -941,7 +942,9 @@ class PresenceHandler(BasePresenceHandler):
now = self.clock.time_msec()
with Measure(self.clock, "presence_update_states"):
with Measure(
self.clock, name="presence_update_states", server_name=self.server_name
):
# NOTE: We purposefully don't await between now and when we've
# calculated what we want to do with the new states, to avoid races.
@@ -1405,7 +1408,7 @@ class PresenceHandler(BasePresenceHandler):
# Based on the state of each user's device calculate the new presence state.
presence = _combine_device_states(devices.values())
new_fields = {"state": presence}
new_fields: JsonDict = {"state": presence}
if presence == PresenceState.ONLINE or presence == PresenceState.BUSY:
new_fields["last_active_ts"] = now
@@ -1497,7 +1500,9 @@ class PresenceHandler(BasePresenceHandler):
async def _unsafe_process(self) -> None:
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "presence_delta"):
with Measure(
self.clock, name="presence_delta", server_name=self.server_name
):
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
if self._event_pos == room_max_stream_ordering:
return
@@ -1759,6 +1764,7 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
# Same with get_presence_router:
#
# AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
self.server_name = hs.hostname
self.get_presence_handler = hs.get_presence_handler
self.get_presence_router = hs.get_presence_router
self.clock = hs.get_clock()
@@ -1792,7 +1798,9 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
user_id = user.to_string()
stream_change_cache = self.store.presence_stream_cache
with Measure(self.clock, "presence.get_new_events"):
with Measure(
self.clock, name="presence.get_new_events", server_name=self.server_name
):
if from_key is not None:
from_key = int(from_key)
+1 -1
View File
@@ -557,7 +557,7 @@ class RegistrationHandler:
if join_rules_event:
join_rule = join_rules_event.content.get("join_rule", None)
requires_invite = (
join_rule and join_rule != JoinRules.PUBLIC
join_rule is not None and join_rule != JoinRules.PUBLIC
)
# Send the invite, if necessary.
+45 -158
View File
@@ -81,7 +81,6 @@ from synapse.types import (
Requester,
RoomAlias,
RoomID,
RoomIdWithDomain,
RoomStreamToken,
StateMap,
StrCollection,
@@ -221,28 +220,8 @@ class RoomCreationHandler:
old_room = await self.store.get_room(old_room_id)
if old_room is None:
raise NotFoundError("Unknown room id %s" % (old_room_id,))
old_room_is_public, _ = old_room
creation_event_with_context = None
if new_version.msc4291_room_ids_as_hashes:
old_room_create_event = await self.store.get_create_event_for_room(
old_room_id
)
creation_content = self._calculate_upgraded_room_creation_content(
old_room_create_event,
tombstone_event_id=None,
new_room_version=new_version,
)
creation_event_with_context = await self._generate_create_event_for_room_id(
requester,
creation_content,
old_room_is_public,
new_version,
)
(create_event, _) = creation_event_with_context
new_room_id = create_event.room_id
else:
new_room_id = self._generate_room_id()
new_room_id = self._generate_room_id()
# Try several times, it could fail with PartialStateConflictError
# in _upgrade_room, cf comment in except block.
@@ -291,7 +270,6 @@ class RoomCreationHandler:
new_version,
tombstone_event,
tombstone_context,
creation_event_with_context,
)
return ret
@@ -315,9 +293,6 @@ class RoomCreationHandler:
new_version: RoomVersion,
tombstone_event: EventBase,
tombstone_context: synapse.events.snapshot.EventContext,
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
) -> str:
"""
Args:
@@ -329,7 +304,7 @@ class RoomCreationHandler:
new_version: the version to upgrade the room to
tombstone_event: the tombstone event to send to the old room
tombstone_context: the context for the tombstone event
creation_event_with_context: The new room's create event, for room IDs as create event IDs.
Raises:
ShadowBanError if the requester is shadow-banned.
"""
@@ -338,16 +313,14 @@ class RoomCreationHandler:
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
# We've already stored the room if we have the create event
if not creation_event_with_context:
# create the new room. may raise a `StoreError` in the exceedingly unlikely
# event of a room ID collision.
await self.store.store_room(
room_id=new_room_id,
room_creator_user_id=user_id,
is_public=old_room[0],
room_version=new_version,
)
# create the new room. may raise a `StoreError` in the exceedingly unlikely
# event of a room ID collision.
await self.store.store_room(
room_id=new_room_id,
room_creator_user_id=user_id,
is_public=old_room[0],
room_version=new_version,
)
await self.clone_existing_room(
requester,
@@ -355,7 +328,6 @@ class RoomCreationHandler:
new_room_id=new_room_id,
new_room_version=new_version,
tombstone_event_id=tombstone_event.event_id,
creation_event_with_context=creation_event_with_context,
)
# now send the tombstone
@@ -462,6 +434,7 @@ class RoomCreationHandler:
)
except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e)
await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
@@ -476,30 +449,6 @@ class RoomCreationHandler:
ratelimit=False,
)
def _calculate_upgraded_room_creation_content(
self,
old_room_create_event: EventBase,
tombstone_event_id: Optional[str],
new_room_version: RoomVersion,
) -> JsonDict:
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {
"room_id": old_room_create_event.room_id,
},
}
if tombstone_event_id is not None:
creation_content["predecessor"]["event_id"] = tombstone_event_id
# Check if old room was non-federatable
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
# If so, mark the new room as non-federatable as well
creation_content[EventContentFields.FEDERATE] = False
# Copy the room type as per MSC3818.
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
if room_type is not None:
creation_content[EventContentFields.ROOM_TYPE] = room_type
return creation_content
async def clone_existing_room(
self,
requester: Requester,
@@ -507,9 +456,6 @@ class RoomCreationHandler:
new_room_id: str,
new_room_version: RoomVersion,
tombstone_event_id: str,
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
) -> None:
"""Populate a new room based on an old room
@@ -520,23 +466,24 @@ class RoomCreationHandler:
created with _generate_room_id())
new_room_version: the new room version to use
tombstone_event_id: the ID of the tombstone event in the old room.
creation_event_with_context: The create event of the new room, if the new room supports
room ID as create event ID hash.
"""
user_id = requester.user.to_string()
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
}
# Check if old room was non-federatable
# Get old room's create event
old_room_create_event = await self.store.get_create_event_for_room(old_room_id)
if creation_event_with_context:
create_event, _ = creation_event_with_context
creation_content = create_event.content
else:
creation_content = self._calculate_upgraded_room_creation_content(
old_room_create_event,
tombstone_event_id,
new_room_version,
)
# Check if the create event specified a non-federatable room
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
# If so, mark the new room as non-federatable as well
creation_content[EventContentFields.FEDERATE] = False
initial_state = {}
# Replicate relevant room events
@@ -552,8 +499,11 @@ class RoomCreationHandler:
(EventTypes.PowerLevels, ""),
]
# Copy the room type as per MSC3818.
room_type = old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
if room_type is not None:
creation_content[EventContentFields.ROOM_TYPE] = room_type
# If the old room was a space, copy over the rooms in the space.
if room_type == RoomTypes.SPACE:
types_to_copy.append((EventTypes.SpaceChild, None))
@@ -653,7 +603,6 @@ class RoomCreationHandler:
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
creation_event_with_context=creation_event_with_context,
)
# Transfer membership events
@@ -953,26 +902,11 @@ class RoomCreationHandler:
self._validate_room_config(config, visibility)
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version.identifier
creation_event_with_context = None
if room_version.msc4291_room_ids_as_hashes:
creation_event_with_context = await self._generate_create_event_for_room_id(
requester,
creation_content,
is_public,
room_version,
)
(create_event, _) = creation_event_with_context
room_id = create_event.room_id
else:
room_id = await self._generate_and_create_room_id(
creator_id=user_id,
is_public=is_public,
room_version=room_version,
)
room_id = await self._generate_and_create_room_id(
creator_id=user_id,
is_public=is_public,
room_version=room_version,
)
# Check whether this visibility value is blocked by a third party module
allowed_by_third_party_rules = await (
@@ -1009,6 +943,11 @@ class RoomCreationHandler:
for val in raw_initial_state:
initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version.identifier
(
last_stream_id,
last_sent_event_id,
@@ -1025,7 +964,6 @@ class RoomCreationHandler:
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
ignore_forced_encryption=ignore_forced_encryption,
creation_event_with_context=creation_event_with_context,
)
# we avoid dropping the lock between invites, as otherwise joins can
@@ -1091,38 +1029,6 @@ class RoomCreationHandler:
return room_id, room_alias, last_stream_id
async def _generate_create_event_for_room_id(
self,
creator: Requester,
creation_content: JsonDict,
is_public: bool,
room_version: RoomVersion,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
(
creation_event,
new_unpersisted_context,
) = await self.event_creation_handler.create_event(
creator,
{
"content": creation_content,
"sender": creator.user.to_string(),
"type": EventTypes.Create,
"state_key": "",
},
prev_event_ids=[],
depth=1,
state_map={},
for_batch=False,
)
await self.store.store_room(
room_id=creation_event.room_id,
room_creator_user_id=creator.user.to_string(),
is_public=is_public,
room_version=room_version,
)
creation_context = await new_unpersisted_context.persist(creation_event)
return (creation_event, creation_context)
async def _send_events_for_new_room(
self,
creator: Requester,
@@ -1136,9 +1042,6 @@ class RoomCreationHandler:
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
ignore_forced_encryption: bool = False,
creation_event_with_context: Optional[
Tuple[EventBase, synapse.events.snapshot.EventContext]
] = None,
) -> Tuple[int, str, int]:
"""Sends the initial events into a new room. Sends the room creation, membership,
and power level events into the room sequentially, then creates and batches up the
@@ -1175,10 +1078,7 @@ class RoomCreationHandler:
user in this room.
ignore_forced_encryption:
Ignore encryption forced by `encryption_enabled_by_default_for_room_type` setting.
creation_event_with_context:
Set in MSC4291 rooms where the create event determines the room ID. If provided,
does not create an additional create event but instead appends the remaining new
events onto the provided create event.
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
@@ -1243,26 +1143,13 @@ class RoomCreationHandler:
preset_config, config = self._room_preset_config(room_config)
if creation_event_with_context is None:
# MSC2175 removes the creator field from the create event.
if not room_version.implicit_room_creator:
creation_content["creator"] = creator_id
creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False
)
creation_context = await unpersisted_creation_context.persist(
creation_event
)
else:
(creation_event, creation_context) = creation_event_with_context
# we had to do the above already in order to have a room ID, so just updates local vars
# and continue.
depth = 2
prev_event = [creation_event.event_id]
state_map[(creation_event.type, creation_event.state_key)] = (
creation_event.event_id
)
# MSC2175 removes the creator field from the create event.
if not room_version.implicit_room_creator:
creation_content["creator"] = creator_id
creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False
)
creation_context = await unpersisted_creation_context.persist(creation_event)
logger.debug("Sending %s in new room", EventTypes.Member)
ev = await self.event_creation_handler.handle_new_client_event(
requester=creator,
@@ -1522,7 +1409,7 @@ class RoomCreationHandler:
A random room ID of the form "!opaque_id:domain".
"""
random_string = stringutils.random_string(18)
return RoomIdWithDomain(random_string, self.hs.hostname).to_string()
return RoomID(random_string, self.hs.hostname).to_string()
async def _generate_and_create_room_id(
self,
+4 -22
View File
@@ -388,11 +388,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
async def _local_membership_update(
self,
*,
requester: Requester,
target: UserID,
room_id: str,
membership: str,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
@@ -414,11 +414,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
desired membership event.
room_id:
membership:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
@@ -486,7 +481,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"origin_server_ts": origin_server_ts,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
@@ -583,7 +577,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
@@ -607,10 +600,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
@@ -680,7 +669,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room=new_room,
require_consent=require_consent,
outlier=outlier,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
@@ -703,7 +691,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
@@ -729,10 +716,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
The full state at a given event. This was previously used particularly
@@ -933,7 +916,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
# InviteRule.IGNORE is handled at the sync layer.
# An empty prev_events list is allowed as long as the auth_event_ids are present
if prev_event_ids is not None:
return await self._local_membership_update(
requester=requester,
@@ -942,7 +924,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
@@ -1173,8 +1154,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room:
# we used to add the domain of the room ID to remote_room_hosts.
# This is not safe in MSC4291 rooms which do not have a domain.
# The knock needs to be sent over federation instead
remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK
try:
+1 -1
View File
@@ -197,7 +197,7 @@ class SendEmailHandler:
additional_headers: A map of additional headers to include.
"""
try:
from_string = self._from % {"app": app_name}
from_string = self._from % {"app": app_name} # type: ignore[operator]
except (KeyError, TypeError):
from_string = self._from
+1 -1
View File
@@ -818,7 +818,7 @@ class SsoHandler:
server_name = avatar_url_parts[-2]
media_id = avatar_url_parts[-1]
if self._is_mine_server_name(server_name):
media = await self._media_repo.store.get_local_media(media_id) # type: ignore[has-type]
media = await self._media_repo.store.get_local_media(media_id)
if media is not None and upload_name == media.upload_name:
logger.info("skipping saving the user avatar")
return True
+13 -4
View File
@@ -329,6 +329,7 @@ class E2eeSyncResult:
class SyncHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.hs_config = hs.config
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
@@ -710,7 +711,9 @@ class SyncHandler:
sync_config = sync_result_builder.sync_config
with Measure(self.clock, "ephemeral_by_room"):
with Measure(
self.clock, name="ephemeral_by_room", server_name=self.server_name
):
typing_key = since_token.typing_key if since_token else 0
room_ids = sync_result_builder.joined_room_ids
@@ -783,7 +786,9 @@ class SyncHandler:
and current token to send down to clients.
newly_joined_room
"""
with Measure(self.clock, "load_filtered_recents"):
with Measure(
self.clock, name="load_filtered_recents", server_name=self.server_name
):
timeline_limit = sync_config.filter_collection.timeline_limit()
block_all_timeline = (
sync_config.filter_collection.blocks_all_room_timeline()
@@ -1174,7 +1179,9 @@ class SyncHandler:
# updates even if they occurred logically before the previous event.
# TODO(mjark) Check for new redactions in the state events.
with Measure(self.clock, "compute_state_delta"):
with Measure(
self.clock, name="compute_state_delta", server_name=self.server_name
):
# The memberships needed for events in the timeline.
# Only calculated when `lazy_load_members` is on.
members_to_fetch: Optional[Set[str]] = None
@@ -1791,7 +1798,9 @@ class SyncHandler:
# the DB.
return RoomNotifCounts.empty()
with Measure(self.clock, "unread_notifs_for_room_id"):
with Measure(
self.clock, name="unread_notifs_for_room_id", server_name=self.server_name
):
return await self.store.get_unread_event_push_actions_by_room_for_user(
room_id,
sync_config.user.to_string(),
+7 -2
View File
@@ -503,6 +503,7 @@ class TypingWriterHandler(FollowerTypingHandler):
class TypingNotificationEventSource(EventSource[int, JsonMapping]):
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self._main_store = hs.get_datastores().main
self.clock = hs.get_clock()
# We can't call get_typing_handler here because there's a cycle:
@@ -535,7 +536,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
appservice may be interested in.
* The latest known room serial.
"""
with Measure(self.clock, "typing.get_new_events_as"):
with Measure(
self.clock, name="typing.get_new_events_as", server_name=self.server_name
):
handler = self.get_typing_handler()
events = []
@@ -571,7 +574,9 @@ class TypingNotificationEventSource(EventSource[int, JsonMapping]):
Find typing notifications for given rooms (> `from_token` and <= `to_token`)
"""
with Measure(self.clock, "typing.get_new_events"):
with Measure(
self.clock, name="typing.get_new_events", server_name=self.server_name
):
from_key = int(from_key)
handler = self.get_typing_handler()
+3 -1
View File
@@ -237,7 +237,9 @@ class UserDirectoryHandler(StateDeltasHandler):
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "user_dir_delta"):
with Measure(
self.clock, name="user_dir_delta", server_name=self.server_name
):
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
if self.pos == room_max_stream_ordering:
return
+20 -13
View File
@@ -33,10 +33,11 @@ from twisted.internet.interfaces import (
IAddress,
IConnector,
IProtocol,
IProtocolFactory,
IReactorCore,
IStreamClientEndpoint,
)
from twisted.internet.protocol import ClientFactory, Protocol, connectionDone
from twisted.internet.protocol import ClientFactory, connectionDone
from twisted.python.failure import Failure
from twisted.web import http
@@ -116,11 +117,7 @@ class HTTPConnectProxyEndpoint:
def __repr__(self) -> str:
return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
# Mypy encounters a false positive here: it complains that ClientFactory
# is incompatible with IProtocolFactory. But ClientFactory inherits from
# Factory, which implements IProtocolFactory. So I think this is a bug
# in mypy-zope.
def connect(self, protocolFactory: ClientFactory) -> "defer.Deferred[IProtocol]": # type: ignore[override]
def connect(self, protocolFactory: IProtocolFactory) -> "defer.Deferred[IProtocol]":
f = HTTPProxiedClientFactory(
self._host, self._port, protocolFactory, self._proxy_creds
)
@@ -148,7 +145,7 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
self,
dst_host: bytes,
dst_port: int,
wrapped_factory: ClientFactory,
wrapped_factory: IProtocolFactory,
proxy_creds: Optional[ProxyCredentials],
):
self.dst_host = dst_host
@@ -158,7 +155,10 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
self.on_connection: "defer.Deferred[None]" = defer.Deferred()
def startedConnecting(self, connector: IConnector) -> None:
return self.wrapped_factory.startedConnecting(connector)
# We expect the wrapped factory to be a ClientFactory, but the generic
# interfaces only guarantee that it implements IProtocolFactory.
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.startedConnecting(connector)
def buildProtocol(self, addr: IAddress) -> "HTTPConnectProtocol":
wrapped_protocol = self.wrapped_factory.buildProtocol(addr)
@@ -177,13 +177,15 @@ class HTTPProxiedClientFactory(protocol.ClientFactory):
logger.debug("Connection to proxy failed: %s", reason)
if not self.on_connection.called:
self.on_connection.errback(reason)
return self.wrapped_factory.clientConnectionFailed(connector, reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
logger.debug("Connection to proxy lost: %s", reason)
if not self.on_connection.called:
self.on_connection.errback(reason)
return self.wrapped_factory.clientConnectionLost(connector, reason)
if isinstance(self.wrapped_factory, ClientFactory):
return self.wrapped_factory.clientConnectionLost(connector, reason)
class HTTPConnectProtocol(protocol.Protocol):
@@ -208,7 +210,7 @@ class HTTPConnectProtocol(protocol.Protocol):
self,
host: bytes,
port: int,
wrapped_protocol: Protocol,
wrapped_protocol: IProtocol,
connected_deferred: defer.Deferred,
proxy_creds: Optional[ProxyCredentials],
):
@@ -223,11 +225,14 @@ class HTTPConnectProtocol(protocol.Protocol):
)
self.http_setup_client.on_connected.addCallback(self.proxyConnected)
# Set once we start connecting to the wrapped protocol
self.wrapped_connection_started = False
def connectionMade(self) -> None:
self.http_setup_client.makeConnection(self.transport)
def connectionLost(self, reason: Failure = connectionDone) -> None:
if self.wrapped_protocol.connected:
if self.wrapped_connection_started:
self.wrapped_protocol.connectionLost(reason)
self.http_setup_client.connectionLost(reason)
@@ -236,6 +241,8 @@ class HTTPConnectProtocol(protocol.Protocol):
self.connected_deferred.errback(reason)
def proxyConnected(self, _: Union[None, "defer.Deferred[None]"]) -> None:
self.wrapped_connection_started = True
assert self.transport is not None
self.wrapped_protocol.makeConnection(self.transport)
self.connected_deferred.callback(self.wrapped_protocol)
@@ -247,7 +254,7 @@ class HTTPConnectProtocol(protocol.Protocol):
def dataReceived(self, data: bytes) -> None:
# if we've set up the HTTP protocol, we can send the data there
if self.wrapped_protocol.connected:
if self.wrapped_connection_started:
return self.wrapped_protocol.dataReceived(data)
# otherwise, we must still be setting up the connection: send the data to the
@@ -92,6 +92,7 @@ class MatrixFederationAgent:
def __init__(
self,
server_name: str,
reactor: ISynapseReactor,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
user_agent: bytes,
@@ -100,6 +101,11 @@ class MatrixFederationAgent:
_srv_resolver: Optional[SrvResolver] = None,
_well_known_resolver: Optional[WellKnownResolver] = None,
):
"""
Args:
server_name: Our homeserver name (used to label metrics) (`hs.hostname`).
"""
# proxy_reactor is not blocklisting reactor
proxy_reactor = reactor
@@ -127,6 +133,7 @@ class MatrixFederationAgent:
if _well_known_resolver is None:
_well_known_resolver = WellKnownResolver(
server_name,
reactor,
agent=BlocklistingAgentWrapper(
ProxyAgent(
+14 -1
View File
@@ -91,12 +91,19 @@ class WellKnownResolver:
def __init__(
self,
server_name: str,
reactor: IReactorTime,
agent: IAgent,
user_agent: bytes,
well_known_cache: Optional[TTLCache[bytes, Optional[bytes]]] = None,
had_well_known_cache: Optional[TTLCache[bytes, bool]] = None,
):
"""
Args:
server_name: Our homeserver name (used to label metrics) (`hs.hostname`).
"""
self.server_name = server_name
self._reactor = reactor
self._clock = Clock(reactor)
@@ -134,7 +141,13 @@ class WellKnownResolver:
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
with Measure(
self._clock,
name="get_well_known",
# This should be our homeserver where the the code is running (used to
# label metrics)
server_name=self.server_name,
):
result: Optional[bytes]
cache_period: float
+6 -1
View File
@@ -417,6 +417,7 @@ class MatrixFederationHttpClient:
if hs.get_instance_name() in outbound_federation_restricted_to:
# Talk to federation directly
federation_agent: IAgent = MatrixFederationAgent(
self.server_name,
self.reactor,
tls_client_options_factory,
user_agent.encode("ascii"),
@@ -697,7 +698,11 @@ class MatrixFederationHttpClient:
outgoing_requests_counter.labels(request.method).inc()
try:
with Measure(self.clock, "outbound_request"):
with Measure(
self.clock,
name="outbound_request",
server_name=self.server_name,
):
# we don't want all the fancy cookie and redirect handling
# that treq.request gives: just use the raw Agent.
+26 -4
View File
@@ -133,7 +133,7 @@ def decode_body(
content_type: The Content-Type header.
Returns:
The parsed HTML body, or None if an error occurred during processed.
The parsed HTML body, or None if an error occurred during processing.
"""
# If there's no body, nothing useful is going to be found.
if not body:
@@ -158,9 +158,31 @@ def decode_body(
# Create an HTML parser.
parser = etree.HTMLParser(recover=True, encoding=encoding)
# Attempt to parse the body. Returns None if the body was successfully
# parsed, but no tree was found.
return etree.fromstring(body, parser)
# Attempt to parse the body. With `lxml` 6.0.0+, this will be an empty HTML
# tree if the body was successfully parsed, but no tree was found. In
# previous `lxml` versions, `etree.fromstring` would return `None` in that
# case.
html_tree = etree.fromstring(body, parser)
# Account for the above referenced case where `html_tree` is an HTML tree
# with an empty body. If so, return None.
if html_tree is not None and html_tree.tag == "html":
# If the tree has only a single <body> element and it's empty, then
# return None.
body_el = html_tree.find("body")
if body_el is not None and len(html_tree) == 1:
# Extract the content of the body tag as text.
body_text = "".join(cast(Iterable[str], body_el.itertext()))
# Strip any undecodable Unicode characters and whitespace.
body_text = body_text.strip("\ufffd").strip()
# If there's no text left, and there were no child tags,
# then we consider the <body> tag empty.
if not body_text and len(body_el) == 0:
return None
return html_tree
def _get_meta_tags(
+37 -3
View File
@@ -66,6 +66,21 @@ all_gauges: Dict[str, Collector] = {}
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
SERVER_NAME_LABEL = "server_name"
"""
The `server_name` label is used to identify the homeserver that the metrics correspond
to. Because we support multiple instances of Synapse running in the same process and all
metrics are in a single global `REGISTRY`, we need to manually label any metrics.
In the case of a Synapse homeserver, this should be set to the homeserver name
(`hs.hostname`).
We're purposely not using the `instance` label for this purpose as that should be "The
<host>:<port> part of the target's URL that was scraped.". Also: "In Prometheus
terms, an endpoint you can scrape is called an *instance*, usually corresponding to a
single process." (source: https://prometheus.io/docs/concepts/jobs_instances/)
"""
class _RegistryProxy:
@staticmethod
@@ -192,7 +207,16 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
same key.
Note that `callback` may be called on a separate thread.
Args:
key: A tuple of label values, which must match the order of the
`labels` given to the constructor.
callback
"""
assert len(key) == len(self.labels), (
f"Expected {len(self.labels)} labels in `key`, got {len(key)}: {key}"
)
with self._lock:
self._registrations.setdefault(key, set()).add(callback)
@@ -201,7 +225,17 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
key: Tuple[str, ...],
callback: Callable[[MetricsEntry], None],
) -> None:
"""Registers that we've exited a block with labels `key`."""
"""
Registers that we've exited a block with labels `key`.
Args:
key: A tuple of label values, which must match the order of the
`labels` given to the constructor.
callback
"""
assert len(key) == len(self.labels), (
f"Expected {len(self.labels)} labels in `key`, got {len(key)}: {key}"
)
with self._lock:
self._registrations.setdefault(key, set()).discard(callback)
@@ -225,7 +259,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
with self._lock:
callbacks = set(self._registrations[key])
in_flight.add_metric(key, len(callbacks))
in_flight.add_metric(labels=key, value=len(callbacks))
metrics = self._metrics_class()
metrics_by_key[key] = metrics
@@ -239,7 +273,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
"_".join([self.name, name]), "", labels=self.labels
)
for key, metrics in metrics_by_key.items():
gauge.add_metric(key, getattr(metrics, name))
gauge.add_metric(labels=key, value=getattr(metrics, name))
yield gauge
def _register_with_collector(self) -> None:
+1 -1
View File
@@ -33,7 +33,7 @@ from twisted.internet.asyncioreactor import AsyncioSelectorReactor
from synapse.metrics._types import Collector
try:
from selectors import KqueueSelector
from selectors import KqueueSelector # type: ignore[attr-defined]
except ImportError:
class KqueueSelector: # type: ignore[no-redef]
+1 -1
View File
@@ -284,7 +284,7 @@ class ModuleApi:
try:
app_name = self._hs.config.email.email_app_name
self._from_string = self._hs.config.email.email_notif_from % {
self._from_string = self._hs.config.email.email_notif_from % { # type: ignore[operator]
"app": app_name
}
except (KeyError, TypeError):
@@ -31,6 +31,7 @@ IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitabl
class MediaRepositoryModuleApiCallbacks:
def __init__(self, hs: "HomeServer") -> None:
self.server_name = hs.hostname
self.clock = hs.get_clock()
self._get_media_config_for_user_callbacks: List[
GET_MEDIA_CONFIG_FOR_USER_CALLBACK
@@ -57,7 +58,11 @@ class MediaRepositoryModuleApiCallbacks:
async def get_media_config_for_user(self, user_id: str) -> Optional[JsonDict]:
for callback in self._get_media_config_for_user_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res: Optional[JsonDict] = await delay_cancellation(callback(user_id))
if res:
return res
@@ -68,7 +73,11 @@ class MediaRepositoryModuleApiCallbacks:
self, user_id: str, size: int
) -> bool:
for callback in self._is_user_allowed_to_upload_media_of_size_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res: bool = await delay_cancellation(callback(user_id, size))
if not res:
return res
@@ -43,6 +43,7 @@ GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK = Callable[
class RatelimitModuleApiCallbacks:
def __init__(self, hs: "HomeServer") -> None:
self.server_name = hs.hostname
self.clock = hs.get_clock()
self._get_ratelimit_override_for_user_callbacks: List[
GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK
@@ -64,7 +65,11 @@ class RatelimitModuleApiCallbacks:
self, user_id: str, limiter_name: str
) -> Optional[RatelimitOverride]:
for callback in self._get_ratelimit_override_for_user_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res: Optional[RatelimitOverride] = await delay_cancellation(
callback(user_id, limiter_name)
)
@@ -356,6 +356,7 @@ class SpamCheckerModuleApiCallbacks:
NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM"
def __init__(self, hs: "synapse.server.HomeServer") -> None:
self.server_name = hs.hostname
self.clock = hs.get_clock()
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
@@ -490,7 +491,11 @@ class SpamCheckerModuleApiCallbacks:
generally discouraged as it doesn't support internationalization.
"""
for callback in self._check_event_for_spam_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(event))
if res is False or res == self.NOT_SPAM:
# This spam-checker accepts the event.
@@ -543,7 +548,11 @@ class SpamCheckerModuleApiCallbacks:
True if the event should be silently dropped
"""
for callback in self._should_drop_federated_event_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res: Union[bool, str] = await delay_cancellation(callback(event))
if res:
return res
@@ -565,7 +574,11 @@ class SpamCheckerModuleApiCallbacks:
NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
"""
for callback in self._user_may_join_room_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(user_id, room_id, is_invited))
# Normalize return values to `Codes` or `"NOT_SPAM"`.
if res is True or res is self.NOT_SPAM:
@@ -604,7 +617,11 @@ class SpamCheckerModuleApiCallbacks:
NOT_SPAM if the operation is permitted, Codes otherwise.
"""
for callback in self._user_may_invite_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(
callback(inviter_userid, invitee_userid, room_id)
)
@@ -643,7 +660,11 @@ class SpamCheckerModuleApiCallbacks:
NOT_SPAM if the operation is permitted, Codes otherwise.
"""
for callback in self._federated_user_may_invite_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(event))
# Normalize return values to `Codes` or `"NOT_SPAM"`.
if res is True or res is self.NOT_SPAM:
@@ -686,7 +707,11 @@ class SpamCheckerModuleApiCallbacks:
NOT_SPAM if the operation is permitted, Codes otherwise.
"""
for callback in self._user_may_send_3pid_invite_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(
callback(inviter_userid, medium, address, room_id)
)
@@ -722,7 +747,11 @@ class SpamCheckerModuleApiCallbacks:
room_config: The room creation configuration which is the body of the /createRoom request
"""
for callback in self._user_may_create_room_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
checker_args = inspect.signature(callback)
# Also ensure backwards compatibility with spam checker callbacks
# that don't expect the room_config argument.
@@ -786,7 +815,11 @@ class SpamCheckerModuleApiCallbacks:
content: The content of the state event
"""
for callback in self._user_may_send_state_event_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
# We make a copy of the content to ensure that the spam checker cannot modify it.
res = await delay_cancellation(
callback(user_id, room_id, event_type, state_key, deepcopy(content))
@@ -814,7 +847,11 @@ class SpamCheckerModuleApiCallbacks:
"""
for callback in self._user_may_create_room_alias_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(userid, room_alias))
if res is True or res is self.NOT_SPAM:
continue
@@ -847,7 +884,11 @@ class SpamCheckerModuleApiCallbacks:
room_id: The ID of the room that would be published
"""
for callback in self._user_may_publish_room_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(userid, room_id))
if res is True or res is self.NOT_SPAM:
continue
@@ -889,7 +930,11 @@ class SpamCheckerModuleApiCallbacks:
True if the user is spammy.
"""
for callback in self._check_username_for_spam_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
checker_args = inspect.signature(callback)
# Make a copy of the user profile object to ensure the spam checker cannot
# modify it.
@@ -938,7 +983,11 @@ class SpamCheckerModuleApiCallbacks:
"""
for callback in self._check_registration_for_spam_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
behaviour = await delay_cancellation(
callback(email_threepid, username, request_info, auth_provider_id)
)
@@ -980,7 +1029,11 @@ class SpamCheckerModuleApiCallbacks:
"""
for callback in self._check_media_file_for_spam_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(callback(file_wrapper, file_info))
# Normalize return values to `Codes` or `"NOT_SPAM"`.
if res is False or res is self.NOT_SPAM:
@@ -1027,7 +1080,11 @@ class SpamCheckerModuleApiCallbacks:
"""
for callback in self._check_login_for_spam_callbacks:
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
with Measure(
self.clock,
name=f"{callback.__module__}.{callback.__qualname__}",
server_name=self.server_name,
):
res = await delay_cancellation(
callback(
user_id,
+2 -1
View File
@@ -129,7 +129,8 @@ class BulkPushRuleEvaluator:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self._event_auth_handler = hs.get_event_auth_handler()
self.should_calculate_push_rules = self.hs.config.push.enable_push
+4 -1
View File
@@ -76,6 +76,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.clock = hs.get_clock()
@@ -122,7 +123,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
async def _handle_request( # type: ignore[override]
self, request: Request, content: JsonDict
) -> Tuple[int, JsonDict]:
with Measure(self.clock, "repl_fed_send_events_parse"):
with Measure(
self.clock, name="repl_fed_send_events_parse", server_name=self.server_name
):
room_id = content["room_id"]
backfilled = content["backfilled"]
+4 -1
View File
@@ -76,6 +76,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
@@ -121,7 +122,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
async def _handle_request( # type: ignore[override]
self, request: Request, content: JsonDict, event_id: str
) -> Tuple[int, JsonDict]:
with Measure(self.clock, "repl_send_event_parse"):
with Measure(
self.clock, name="repl_send_event_parse", server_name=self.server_name
):
event_dict = content["event"]
room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]]
internal_metadata = content["internal_metadata"]
+4 -1
View File
@@ -77,6 +77,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.server_name = hs.hostname
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
@@ -122,7 +123,9 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
async def _handle_request( # type: ignore[override]
self, request: Request, payload: JsonDict
) -> Tuple[int, JsonDict]:
with Measure(self.clock, "repl_send_events_parse"):
with Measure(
self.clock, name="repl_send_events_parse", server_name=self.server_name
):
events_and_context = []
events = payload["events"]
rooms = set()
+6 -1
View File
@@ -75,6 +75,7 @@ class ReplicationDataHandler:
"""
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
self._reactor = hs.get_reactor()
@@ -342,7 +343,11 @@ class ReplicationDataHandler:
waiting_list.add((position, deferred))
# We measure here to get in flight counts and average waiting time.
with Measure(self._clock, "repl.wait_for_stream_position"):
with Measure(
self._clock,
name="repl.wait_for_stream_position",
server_name=self.server_name,
):
logger.info(
"Waiting for repl stream %r to reach %s (%s); currently at: %s",
stream_name,
+6 -1
View File
@@ -78,6 +78,7 @@ class ReplicationStreamer:
"""
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
@@ -155,7 +156,11 @@ class ReplicationStreamer:
while self.pending_updates:
self.pending_updates = False
with Measure(self.clock, "repl.stream.get_updates"):
with Measure(
self.clock,
name="repl.stream.get_updates",
server_name=self.server_name,
):
all_streams = self.streams
if self._replication_torture_level is not None:
+1
View File
@@ -112,6 +112,7 @@ class VersionsRestServlet(RestServlet):
"v1.9",
"v1.10",
"v1.11",
"v1.12",
],
# as per MSC1497:
"unstable_features": {
+9 -3
View File
@@ -189,7 +189,8 @@ class StateHandler:
"""
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.server_name = hs.hostname # nb must be called this for @measure_func
self.clock = hs.get_clock() # nb must be called this for @measure_func
self.store = hs.get_datastores().main
self._state_storage_controller = hs.get_storage_controllers().state
self.hs = hs
@@ -631,6 +632,7 @@ class StateResolutionHandler:
"""
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
@@ -747,7 +749,9 @@ class StateResolutionHandler:
# which will be used as a cache key for future resolutions, but
# not get persisted.
with Measure(self.clock, "state.create_group_ids"):
with Measure(
self.clock, name="state.create_group_ids", server_name=self.server_name
):
cache = _make_state_cache_entry(new_state, state_groups_ids)
self._state_cache[group_names] = cache
@@ -785,7 +789,9 @@ class StateResolutionHandler:
a map from (type, state_key) to event_id.
"""
try:
with Measure(self.clock, "state._resolve_events") as m:
with Measure(
self.clock, name="state._resolve_events", server_name=self.server_name
) as m:
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
if room_version_obj.state_res == StateResolutionVersions.V1:
return await v1.resolve_events_with_store(
+1
View File
@@ -55,6 +55,7 @@ class SQLBaseStore(metaclass=ABCMeta):
hs: "HomeServer",
):
self.hs = hs
self.server_name = hs.hostname
self._clock = hs.get_clock()
self.database_engine = database.engine
self.db_pool = database
+21 -4
View File
@@ -337,6 +337,7 @@ class EventsPersistenceStorageController:
assert stores.persist_events
self.persist_events_store = stores.persist_events
self.server_name = hs.hostname
self._clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
self.is_mine_id = hs.is_mine_id
@@ -616,7 +617,11 @@ class EventsPersistenceStorageController:
state_delta_for_room = None
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
with Measure(
self._clock,
name="_calculate_state_and_extrem",
server_name=self.server_name,
):
# Work out the new "current state" for the room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
@@ -627,7 +632,11 @@ class EventsPersistenceStorageController:
room_id, chunk
)
with Measure(self._clock, "calculate_chain_cover_index_for_events"):
with Measure(
self._clock,
name="calculate_chain_cover_index_for_events",
server_name=self.server_name,
):
# We now calculate chain ID/sequence numbers for any state events we're
# persisting. We ignore out of band memberships as we're not in the room
# and won't have their auth chain (we'll fix it up later if we join the
@@ -719,7 +728,11 @@ class EventsPersistenceStorageController:
break
logger.debug("Calculating state delta for room %s", room_id)
with Measure(self._clock, "persist_events.get_new_state_after_events"):
with Measure(
self._clock,
name="persist_events.get_new_state_after_events",
server_name=self.server_name,
):
res = await self._get_new_state_after_events(
room_id,
ev_ctx_rm,
@@ -746,7 +759,11 @@ class EventsPersistenceStorageController:
# removed keys entirely.
delta = DeltaState([], delta_ids)
elif current_state is not None:
with Measure(self._clock, "persist_events.calculate_state_delta"):
with Measure(
self._clock,
name="persist_events.calculate_state_delta",
server_name=self.server_name,
):
delta = await self._calculate_state_delta(room_id, current_state)
if delta:
+4 -1
View File
@@ -68,6 +68,7 @@ class StateStorageController:
"""
def __init__(self, hs: "HomeServer", stores: "Databases"):
self.server_name = hs.hostname
self._is_mine_id = hs.is_mine_id
self._clock = hs.get_clock()
self.stores = stores
@@ -812,7 +813,9 @@ class StateStorageController:
state_group = object()
assert state_group is not None
with Measure(self._clock, "get_joined_hosts"):
with Measure(
self._clock, name="get_joined_hosts", server_name=self.server_name
):
return await self._get_joined_hosts(
room_id, state_group, state_entry=state_entry
)
@@ -1246,7 +1246,9 @@ class EventsWorkerStore(SQLBaseStore):
to event row. Note that it may well contain additional events that
were not part of this request.
"""
with Measure(self._clock, "_fetch_event_list"):
with Measure(
self._clock, name="_fetch_event_list", server_name=self.server_name
):
try:
events_to_fetch = {
event_id for events, _ in event_list for event_id in events
+5 -1
View File
@@ -983,7 +983,11 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
`_get_user_ids_from_membership_event_ids` for any uncached events.
"""
with Measure(self._clock, "get_joined_user_ids_from_state"):
with Measure(
self._clock,
name="get_joined_user_ids_from_state",
server_name=self.server_name,
):
users_in_room = set()
member_event_ids = [
e_id for key, e_id in state.items() if key[0] == EventTypes.Member
+4 -3
View File
@@ -990,11 +990,12 @@ class StateMapWrapper(Dict[StateKey, str]):
raise Exception("State map was filtered and doesn't include: %s", key)
return super().__getitem__(key)
@overload # type: ignore[override]
def get(self, key: StateKey, default: None = None, /) -> Optional[str]: ...
@overload
def get(self, key: Tuple[str, str]) -> Optional[str]: ...
def get(self, key: StateKey, default: str, /) -> str: ...
@overload
def get(self, key: Tuple[str, str], default: Union[str, _T]) -> Union[str, _T]: ...
def get(self, key: StateKey, default: _T, /) -> Union[str, _T]: ...
def get(
self, key: StateKey, default: Union[str, _T, None] = None
+3 -3
View File
@@ -34,9 +34,9 @@ AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$"
class IsolationLevel(IntEnum):
READ_COMMITTED: int = 1
REPEATABLE_READ: int = 2
SERIALIZABLE: int = 3
READ_COMMITTED = 1
REPEATABLE_READ = 2
SERIALIZABLE = 3
class IncorrectDatabaseSetup(RuntimeError):
+2 -68
View File
@@ -353,78 +353,12 @@ class RoomAlias(DomainSpecificString):
@attr.s(slots=True, frozen=True, repr=False)
class RoomIdWithDomain(DomainSpecificString):
"""Structure representing a room ID with a domain suffix."""
class RoomID(DomainSpecificString):
"""Structure representing a room id."""
SIGIL = "!"
# the set of urlsafe base64 characters, no padding.
ROOM_ID_PATTERN_DOMAINLESS = re.compile(r"^[A-Za-z0-9\-_]{43}$")
@attr.define(slots=True, frozen=True, repr=False)
class RoomID:
"""Structure representing a room id without a domain.
There are two forms of room IDs:
- "!localpart:domain" used in most room versions prior to MSC4291.
- "!event_id_base_64" used in room versions post MSC4291.
This class will accept any room ID which meets either of these two criteria.
"""
SIGIL = "!"
id: str
room_id_with_domain: Optional[RoomIdWithDomain]
@classmethod
def is_valid(cls: Type["RoomID"], s: str) -> bool:
if ":" in s:
return RoomIdWithDomain.is_valid(s)
try:
cls.from_string(s)
return True
except Exception:
return False
def get_domain(self) -> Optional[str]:
if not self.room_id_with_domain:
return None
return self.room_id_with_domain.domain
def to_string(self) -> str:
if self.room_id_with_domain:
return self.room_id_with_domain.to_string()
return self.id
__repr__ = to_string
@classmethod
def from_string(cls: Type["RoomID"], s: str) -> "RoomID":
# sigil check
if len(s) < 1 or s[0] != cls.SIGIL:
raise SynapseError(
400,
"Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL),
Codes.INVALID_PARAM,
)
room_id_with_domain: Optional[RoomIdWithDomain] = None
if ":" in s:
room_id_with_domain = RoomIdWithDomain.from_string(s)
else:
# MSC4291 room IDs must be valid urlsafe unpadded base64
val = s[1:]
if not ROOM_ID_PATTERN_DOMAINLESS.match(val):
raise SynapseError(
400,
"Expected %s string to be valid urlsafe unpadded base64 '%s'"
% (cls.__name__, val),
Codes.INVALID_PARAM,
)
return cls(id=s, room_id_with_domain=room_id_with_domain)
@attr.s(slots=True, frozen=True, repr=False)
class EventID(DomainSpecificString):
"""Structure representing an event id."""
+1 -1
View File
@@ -97,7 +97,7 @@ _GETADDRINFO_RESULT = List[
SocketKind,
int,
str,
Union[Tuple[str, int], Tuple[str, int, int, int]],
Union[Tuple[str, int], Tuple[str, int, int, int], Tuple[int, bytes]],
]
]
+87 -34
View File
@@ -41,59 +41,100 @@ from synapse.logging.context import (
LoggingContext,
current_context,
)
from synapse.metrics import InFlightGauge
from synapse.metrics import SERVER_NAME_LABEL, InFlightGauge
from synapse.util import Clock
logger = logging.getLogger(__name__)
block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"])
# Metrics to see the number of and how much time is spend in various blocks of code.
#
block_counter = Counter(
"synapse_util_metrics_block_count",
documentation="The number of times this block has been called.",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""The number of times this block has been called."""
block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"])
block_timer = Counter(
"synapse_util_metrics_block_time_seconds",
documentation="The cumulative time spent executing this block across all calls, in seconds.",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""The cumulative time spent executing this block across all calls, in seconds."""
block_ru_utime = Counter(
"synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]
"synapse_util_metrics_block_ru_utime_seconds",
documentation="Resource usage: user CPU time in seconds used in this block",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""Resource usage: user CPU time in seconds used in this block"""
block_ru_stime = Counter(
"synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]
"synapse_util_metrics_block_ru_stime_seconds",
documentation="Resource usage: system CPU time in seconds used in this block",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""Resource usage: system CPU time in seconds used in this block"""
block_db_txn_count = Counter(
"synapse_util_metrics_block_db_txn_count", "", ["block_name"]
"synapse_util_metrics_block_db_txn_count",
documentation="Number of database transactions completed in this block",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""Number of database transactions completed in this block"""
# seconds spent waiting for db txns, excluding scheduling time, in this block
block_db_txn_duration = Counter(
"synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]
"synapse_util_metrics_block_db_txn_duration_seconds",
documentation="Seconds spent waiting for database txns, excluding scheduling time, in this block",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""Seconds spent waiting for database txns, excluding scheduling time, in this block"""
# seconds spent waiting for a db connection, in this block
block_db_sched_duration = Counter(
"synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]
"synapse_util_metrics_block_db_sched_duration_seconds",
documentation="Seconds spent waiting for a db connection, in this block",
labelnames=["block_name", SERVER_NAME_LABEL],
)
"""Seconds spent waiting for a db connection, in this block"""
# This is dynamically created in InFlightGauge.__init__.
class _InFlightMetric(Protocol):
class _BlockInFlightMetric(Protocol):
"""
Sub-metrics used for the `InFlightGauge` for blocks.
"""
real_time_max: float
"""The longest observed duration of any single execution of this block, in seconds."""
real_time_sum: float
"""The cumulative time spent executing this block across all calls, in seconds."""
# Tracks the number of blocks currently active
in_flight: InFlightGauge[_InFlightMetric] = InFlightGauge(
in_flight: InFlightGauge[_BlockInFlightMetric] = InFlightGauge(
"synapse_util_metrics_block_in_flight",
"",
labels=["block_name"],
desc="Tracks the number of blocks currently active",
labels=["block_name", SERVER_NAME_LABEL],
# Matches the fields in the `_BlockInFlightMetric`
sub_metrics=["real_time_max", "real_time_sum"],
)
"""Tracks the number of blocks currently active"""
P = ParamSpec("P")
R = TypeVar("R")
class HasClock(Protocol):
class HasClockAndServerName(Protocol):
clock: Clock
"""
Used to measure functions
"""
server_name: str
"""
The homeserver name that this Measure is associated with (used to label the metric)
(`hs.hostname`).
"""
def measure_func(
@@ -101,8 +142,9 @@ def measure_func(
) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]:
"""Decorate an async method with a `Measure` context manager.
The Measure is created using `self.clock`; it should only be used to decorate
methods in classes defining an instance-level `clock` attribute.
The Measure is created using `self.clock` and `self.server_name; it should only be
used to decorate methods in classes defining an instance-level `clock` and
`server_name` attributes.
Usage:
@@ -116,16 +158,21 @@ def measure_func(
with Measure(...):
...
Args:
name: The name of the metric to report (the block name) (used to label the
metric). Defaults to the name of the decorated function.
"""
def wrapper(
func: Callable[Concatenate[HasClock, P], Awaitable[R]],
func: Callable[Concatenate[HasClockAndServerName, P], Awaitable[R]],
) -> Callable[P, Awaitable[R]]:
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self: HasClock, *args: P.args, **kwargs: P.kwargs) -> R:
with Measure(self.clock, block_name):
async def measured_func(
self: HasClockAndServerName, *args: P.args, **kwargs: P.kwargs
) -> R:
with Measure(self.clock, name=block_name, server_name=self.server_name):
r = await func(self, *args, **kwargs)
return r
@@ -142,19 +189,24 @@ class Measure:
__slots__ = [
"clock",
"name",
"server_name",
"_logging_context",
"start",
]
def __init__(self, clock: Clock, name: str) -> None:
def __init__(self, clock: Clock, *, name: str, server_name: str) -> None:
"""
Args:
clock: An object with a "time()" method, which returns the current
time in seconds.
name: The name of the metric to report.
name: The name of the metric to report (the block name) (used to label the
metric).
server_name: The homeserver name that this Measure is associated with (used to
label the metric) (`hs.hostname`).
"""
self.clock = clock
self.name = name
self.server_name = server_name
curr_context = current_context()
if not curr_context:
logger.warning(
@@ -174,7 +226,7 @@ class Measure:
self.start = self.clock.time()
self._logging_context.__enter__()
in_flight.register((self.name,), self._update_in_flight)
in_flight.register((self.name, self.server_name), self._update_in_flight)
logger.debug("Entering block %s", self.name)
@@ -194,19 +246,20 @@ class Measure:
duration = self.clock.time() - self.start
usage = self.get_resource_usage()
in_flight.unregister((self.name,), self._update_in_flight)
in_flight.unregister((self.name, self.server_name), self._update_in_flight)
self._logging_context.__exit__(exc_type, exc_val, exc_tb)
try:
block_counter.labels(self.name).inc()
block_timer.labels(self.name).inc(duration)
block_ru_utime.labels(self.name).inc(usage.ru_utime)
block_ru_stime.labels(self.name).inc(usage.ru_stime)
block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
except ValueError:
logger.warning("Failed to save metrics! Usage: %s", usage)
labels = {"block_name": self.name, SERVER_NAME_LABEL: self.server_name}
block_counter.labels(**labels).inc()
block_timer.labels(**labels).inc(duration)
block_ru_utime.labels(**labels).inc(usage.ru_utime)
block_ru_stime.labels(**labels).inc(usage.ru_stime)
block_db_txn_count.labels(**labels).inc(usage.db_txn_count)
block_db_txn_duration.labels(**labels).inc(usage.db_txn_duration_sec)
block_db_sched_duration.labels(**labels).inc(usage.db_sched_duration_sec)
except ValueError as exc:
logger.warning("Failed to save metrics! Usage: %s Error: %s", usage, exc)
def get_resource_usage(self) -> ContextResourceUsage:
"""Get the resources used within this Measure block
@@ -215,7 +268,7 @@ class Measure:
"""
return self._logging_context.get_resource_usage()
def _update_in_flight(self, metrics: _InFlightMetric) -> None:
def _update_in_flight(self, metrics: _BlockInFlightMetric) -> None:
"""Gets called when processing in flight metrics"""
assert self.start is not None
duration = self.clock.time() - self.start
+7
View File
@@ -20,6 +20,7 @@
import functools
import sys
from types import GeneratorType
from typing import Any, Callable, Generator, List, TypeVar, cast
from typing_extensions import ParamSpec
@@ -151,6 +152,12 @@ def _check_yield_points(
) -> Generator["Deferred[object]", object, T]:
gen = f(*args, **kwargs)
# We only patch if we have a native generator function, as we rely on
# `gen.gi_frame`.
if not isinstance(gen, GeneratorType):
ret = yield from gen
return ret
last_yield_line_no = gen.gi_frame.f_lineno
result: Any = None
while True:
+1 -1
View File
@@ -672,7 +672,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE)
c = edu["content"]
if stream_id is not None:
self.assertEqual(c["prev_id"], [stream_id]) # type: ignore[unreachable]
self.assertEqual(c["prev_id"], [stream_id])
self.assertGreaterEqual(c["stream_id"], stream_id)
stream_id = c["stream_id"]
devices = {edu["content"]["device_id"] for edu in self.edus}
+5 -61
View File
@@ -204,46 +204,19 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
self.assertEqual(len(events), 2)
self.assertEqual(events[0].event_id, events[1].event_id)
def test_when_empty_prev_events_allowed_create_event_with_empty_prev_events(
def test_reject_event_with_empty_prev_events(
self,
) -> None:
"""When we set allow_no_prev_events=True, should be able to create a
event without any prev_events (only auth_events).
"""
# Create a member event we can use as an auth_event
memberEvent, _ = self._create_and_persist_member_event()
# Try to create the event with empty prev_events bit with some auth_events
event, _ = self.get_success(
self.handler.create_event(
self.requester,
{
"type": EventTypes.Message,
"room_id": self.room_id,
"sender": self.requester.user.to_string(),
"content": {"msgtype": "m.text", "body": random_string(5)},
},
# Empty prev_events is the key thing we're testing here
prev_event_ids=[],
# But with some auth_events
auth_event_ids=[memberEvent.event_id],
# Allow no prev_events!
allow_no_prev_events=True,
)
)
self.assertIsNotNone(event)
def test_when_empty_prev_events_not_allowed_reject_event_with_empty_prev_events(
self,
) -> None:
"""When we set allow_no_prev_events=False, shouldn't be able to create a
event without any prev_events even if it has auth_events. Expect an
exception to be raised.
Shouldn't be able to create an event without any `prev_events` even if it has
`auth_events`. Expect an exception to be raised.
"""
# Create a member event we can use as an auth_event
memberEvent, _ = self._create_and_persist_member_event()
# Try to create the event with empty prev_events but with some auth_events
#
# We expect the test to fail because empty prev_events are not allowed
self.get_failure(
self.handler.create_event(
self.requester,
@@ -257,35 +230,6 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
prev_event_ids=[],
# But with some auth_events
auth_event_ids=[memberEvent.event_id],
# We expect the test to fail because empty prev_events are not
# allowed here!
allow_no_prev_events=False,
),
AssertionError,
)
def test_when_empty_prev_events_allowed_reject_event_with_empty_prev_events_and_auth_events(
self,
) -> None:
"""When we set allow_no_prev_events=True, should be able to create a
event without any prev_events or auth_events. Expect an exception to be
raised.
"""
# Try to create the event with empty prev_events and empty auth_events
self.get_failure(
self.handler.create_event(
self.requester,
{
"type": EventTypes.Message,
"room_id": self.room_id,
"sender": self.requester.user.to_string(),
"content": {"msgtype": "m.text", "body": random_string(5)},
},
prev_event_ids=[],
# The event should be rejected when there are no auth_events
auth_event_ids=[],
# Allow no prev_events!
allow_no_prev_events=True,
),
AssertionError,
)
+1
View File
@@ -86,6 +86,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.mock_federation_client = AsyncMock(spec=["put_json"])
self.mock_federation_client.put_json.return_value = (200, "OK")
self.mock_federation_client.agent = MatrixFederationAgent(
"OUR_STUB_HOMESERVER_NAME",
reactor,
tls_client_options_factory=None,
user_agent=b"SynapseInTrialTest/0.0.0",
@@ -91,6 +91,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
"test_cache", timer=self.reactor.seconds
)
self.well_known_resolver = WellKnownResolver(
"OUR_STUB_HOMESERVER_NAME",
self.reactor,
Agent(self.reactor, contextFactory=self.tls_factory),
b"test-agent",
@@ -269,6 +270,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
because it is created too early during setUp
"""
return MatrixFederationAgent(
"OUR_STUB_HOMESERVER_NAME",
reactor=cast(ISynapseReactor, self.reactor),
tls_client_options_factory=self.tls_factory,
user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided.
@@ -1011,6 +1013,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# Build a new agent and WellKnownResolver with a different tls factory
tls_factory = FederationPolicyForHTTPS(config)
agent = MatrixFederationAgent(
"OUR_STUB_HOMESERVER_NAME",
reactor=self.reactor,
tls_client_options_factory=tls_factory,
user_agent=b"test-agent", # This is unused since _well_known_resolver is passed below.
@@ -1018,6 +1021,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
ip_blocklist=IPSet(),
_srv_resolver=self.mock_resolver,
_well_known_resolver=WellKnownResolver(
"OUR_STUB_HOMESERVER_NAME",
cast(ISynapseReactor, self.reactor),
Agent(self.reactor, contextFactory=tls_factory),
b"test-agent",
@@ -68,6 +68,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
reactor, _ = get_clock()
self.matrix_federation_agent = MatrixFederationAgent(
"OUR_STUB_HOMESERVER_NAME",
reactor,
tls_client_options_factory=None,
user_agent=b"SynapseInTrialTest/0.0.0",
-1
View File
@@ -263,7 +263,6 @@ class RedactionTestCase(unittest.HomeserverTestCase):
@property
def room_id(self) -> str:
assert self._base_builder.room_id is not None
return self._base_builder.room_id
@property
-51
View File
@@ -1,51 +0,0 @@
from synapse.api.errors import SynapseError
from synapse.types import RoomID
from tests.unittest import TestCase
class RoomIDTestCase(TestCase):
def test_can_create_msc4291_room_ids(self) -> None:
valid_msc4291_room_id = "!31hneApxJ_1o-63DmFrpeqnkFfWppnzWso1JvH3ogLM"
room_id = RoomID.from_string(valid_msc4291_room_id)
self.assertEquals(RoomID.is_valid(valid_msc4291_room_id), True)
self.assertEquals(
room_id.to_string(),
valid_msc4291_room_id,
)
self.assertEquals(room_id.id, "!31hneApxJ_1o-63DmFrpeqnkFfWppnzWso1JvH3ogLM")
self.assertEquals(room_id.get_domain(), None)
def test_cannot_create_invalid_msc4291_room_ids(self) -> None:
invalid_room_ids = [
"!wronglength",
"!31hneApxJ_1o-63DmFrpeqnNOTurlsafeBASE64/gLM",
"!",
"! ",
]
for bad_room_id in invalid_room_ids:
with self.assertRaises(SynapseError):
RoomID.from_string(bad_room_id)
if not RoomID.is_valid(bad_room_id):
raise SynapseError(400, "invalid")
def test_cannot_create_invalid_legacy_room_ids(self) -> None:
invalid_room_ids = [
"!something:invalid$_chars.com",
]
for bad_room_id in invalid_room_ids:
with self.assertRaises(SynapseError):
RoomID.from_string(bad_room_id)
if not RoomID.is_valid(bad_room_id):
raise SynapseError(400, "invalid")
def test_can_create_valid_legacy_room_ids(self) -> None:
valid_room_ids = [
"!foo:example.com",
"!foo:example.com:8448",
"!💩💩💩:example.com",
]
for room_id_str in valid_room_ids:
room_id = RoomID.from_string(room_id_str)
self.assertEquals(RoomID.is_valid(room_id_str), True)
self.assertIsNotNone(room_id.get_domain())