Compare commits
4 Commits
anoa/doc_h
...
dmr/storag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d343db081 | ||
|
|
a1367dcf8c | ||
|
|
9e361c8550 | ||
|
|
51fec1a534 |
5
.github/workflows/docker.yml
vendored
5
.github/workflows/docker.yml
vendored
@@ -5,7 +5,7 @@ name: Build docker images
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags: ["v*"]
|
tags: ["v*"]
|
||||||
branches: [ master, main, develop ]
|
branches: [ master, main ]
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
@@ -38,9 +38,6 @@ jobs:
|
|||||||
id: set-tag
|
id: set-tag
|
||||||
run: |
|
run: |
|
||||||
case "${GITHUB_REF}" in
|
case "${GITHUB_REF}" in
|
||||||
refs/heads/develop)
|
|
||||||
tag=develop
|
|
||||||
;;
|
|
||||||
refs/heads/master|refs/heads/main)
|
refs/heads/master|refs/heads/main)
|
||||||
tag=latest
|
tag=latest
|
||||||
;;
|
;;
|
||||||
|
|||||||
16
CHANGES.md
16
CHANGES.md
@@ -1,19 +1,3 @@
|
|||||||
Synapse 1.47.0 (2021-11-17)
|
|
||||||
===========================
|
|
||||||
|
|
||||||
No significant changes since 1.47.0rc3.
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.47.0rc3 (2021-11-16)
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Bugfixes
|
|
||||||
--------
|
|
||||||
|
|
||||||
- Fix a bug introduced in 1.47.0rc1 which caused worker processes to not halt startup in the presence of outstanding database migrations. ([\#11346](https://github.com/matrix-org/synapse/issues/11346))
|
|
||||||
- Fix a bug introduced in 1.47.0rc1 which prevented the 'remove deleted devices from `device_inbox` column' background process from running when updating from a recent Synapse version. ([\#11303](https://github.com/matrix-org/synapse/issues/11303), [\#11353](https://github.com/matrix-org/synapse/issues/11353))
|
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.47.0rc2 (2021-11-10)
|
Synapse 1.47.0rc2 (2021-11-10)
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
Add type annotations to `synapse.metrics`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Experimental support for the thread relation defined in [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Prevent [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical state events from being pushed to an application service via `/transactions`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add support for the `/_matrix/client/v3` APIs from Matrix v1.1.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Changed the word 'Home server' as one word 'homeserver' in documentation.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add dedicated admin API for blocking a room.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add type hints to `synapse.util`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Improve type annotations in Synapse's test suite.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Remove deprecated `trust_identity_server_for_password_resets` configuration flag.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix a bug, introduced in Synapse 1.46.0, which caused the `check_3pid_auth` and `on_logged_out` callbacks in legacy password authentication provider modules to not be registered. Modules using the generic module API were not affected.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add type annotations for some methods and properties in the module API.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add admin API to un-shadow-ban a user.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add admin API to run background jobs.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix a bug introduced in 1.41.0 where space hierarchy responses would be incorrectly reused if multiple users were to make the same request at the same time.
|
|
||||||
1
changelog.d/11357.misc
Normal file
1
changelog.d/11357.misc
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Add a development script for visualising the storage class inheritance hierarchy.
|
||||||
@@ -1 +0,0 @@
|
|||||||
Require all files in synapse/ and tests/ to pass mypy unless specifically excluded.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix running `scripts-dev/complement.sh`, which was broken in v1.47.0rc1.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Rename `get_access_token_for_user_id` to `create_access_token_for_user_id` to better reflect what it does.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Rename `get_refresh_token_for_user_id` to `create_refresh_token_for_user_id` to better describe what it does.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Add support for the `/_matrix/media/v3` APIs from Matrix v1.1.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix an issue introduced in v1.47.0 which prevented servers re-joining rooms they had previously left, if their signing keys were replaced.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Publish a `develop` image to dockerhub.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix missing quotes for wildcard domains in `federation_certificate_verification_whitelist`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Keep fallback key marked as used if it's re-uploaded.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Use `auto_attribs` on the `attrs` class `RefreshTokenLookupResult`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Do not run the broken MSC2716 tests when running `scripts-dev/complement.sh`.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Fix a bug introduced in v1.13.0 where creating and publishing a room could cause errors if `room_list_publication_rules` is configured.
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Remove dead code from supporting ACME.
|
|
||||||
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,15 +1,3 @@
|
|||||||
matrix-synapse-py3 (1.47.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.47.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Wed, 17 Nov 2021 13:09:43 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.47.0~rc3) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.47.0~rc3.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Nov 2021 14:32:47 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
|
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
|
||||||
|
|
||||||
[ Dan Callahan ]
|
[ Dan Callahan ]
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ WORKERS_CONFIG = {
|
|||||||
"app": "synapse.app.user_dir",
|
"app": "synapse.app.user_dir",
|
||||||
"listener_resources": ["client"],
|
"listener_resources": ["client"],
|
||||||
"endpoint_patterns": [
|
"endpoint_patterns": [
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
|
||||||
],
|
],
|
||||||
"shared_extra_conf": {"update_user_directory": False},
|
"shared_extra_conf": {"update_user_directory": False},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
@@ -85,10 +85,10 @@ WORKERS_CONFIG = {
|
|||||||
"app": "synapse.app.generic_worker",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": ["client"],
|
"listener_resources": ["client"],
|
||||||
"endpoint_patterns": [
|
"endpoint_patterns": [
|
||||||
"^/_matrix/client/(v2_alpha|r0|v3)/sync$",
|
"^/_matrix/client/(v2_alpha|r0)/sync$",
|
||||||
"^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$",
|
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3)/initialSync$",
|
"^/_matrix/client/(api/v1|r0)/initialSync$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$",
|
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
|
||||||
],
|
],
|
||||||
"shared_extra_conf": {},
|
"shared_extra_conf": {},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
@@ -146,11 +146,11 @@ WORKERS_CONFIG = {
|
|||||||
"app": "synapse.app.generic_worker",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": ["client"],
|
"listener_resources": ["client"],
|
||||||
"endpoint_patterns": [
|
"endpoint_patterns": [
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
|
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
|
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
"^/_matrix/client/(api/v1|r0|unstable)/join/",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
|
||||||
],
|
],
|
||||||
"shared_extra_conf": {},
|
"shared_extra_conf": {},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
@@ -158,7 +158,7 @@ WORKERS_CONFIG = {
|
|||||||
"frontend_proxy": {
|
"frontend_proxy": {
|
||||||
"app": "synapse.app.frontend_proxy",
|
"app": "synapse.app.frontend_proxy",
|
||||||
"listener_resources": ["client", "replication"],
|
"listener_resources": ["client", "replication"],
|
||||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
|
||||||
"shared_extra_conf": {},
|
"shared_extra_conf": {},
|
||||||
"worker_extra_conf": (
|
"worker_extra_conf": (
|
||||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||||
|
|||||||
@@ -50,10 +50,8 @@ build the documentation with:
|
|||||||
mdbook build
|
mdbook build
|
||||||
```
|
```
|
||||||
|
|
||||||
The rendered contents will be outputted to a new `book/` directory at the root of the repository. Please note that
|
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
|
||||||
index.html is not built by default, it is created by copying over the file `welcome_and_overview.html` to `index.html`
|
browse the book by opening `book/index.html` in a web browser.
|
||||||
during deployment. Thus, when running `mdbook serve` locally the book will initially show a 404 in place of the index
|
|
||||||
due to the above. Do not be alarmed!
|
|
||||||
|
|
||||||
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
||||||
|
|
||||||
|
|||||||
@@ -1,42 +1,42 @@
|
|||||||
# Summary
|
# Summary
|
||||||
|
|
||||||
# Introduction
|
# Introduction
|
||||||
- [Welcome and Overview](introduction/welcome_and_overview.md)
|
- [Welcome and Overview](welcome_and_overview.md)
|
||||||
|
|
||||||
# Setup
|
# Setup
|
||||||
- [Installation](setup/installation.md)
|
- [Installation](setup/installation.md)
|
||||||
- [Using Postgres](setup/postgres.md)
|
- [Using Postgres](postgres.md)
|
||||||
- [Configuring a Reverse Proxy](setup/reverse_proxy.md)
|
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||||
- [Configuring a Turn Server](setup/turn-howto.md)
|
- [Configuring a Turn Server](turn-howto.md)
|
||||||
- [Delegation](setup/delegation.md)
|
- [Delegation](delegate.md)
|
||||||
|
|
||||||
# Upgrading
|
# Upgrading
|
||||||
- [Upgrading between Synapse Versions](upgrade.md)
|
- [Upgrading between Synapse Versions](upgrade.md)
|
||||||
- [Upgrading from pre-Synapse 1.0](upgrading/upgrading_from_pre_synapse_1.0.md)
|
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
- [Federation](usage/federation/README.md)
|
- [Federation](federate.md)
|
||||||
- [Configuration](usage/configuration/README.md)
|
- [Configuration](usage/configuration/README.md)
|
||||||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||||
- [Structured Logging](usage/configuration/structured_logging.md)
|
- [Structured Logging](structured_logging.md)
|
||||||
- [Templates](usage/configuration/templates.md)
|
- [Templates](templates.md)
|
||||||
- [User Authentication](usage/configuration/user_authentication/README.md)
|
- [User Authentication](usage/configuration/user_authentication/README.md)
|
||||||
- [Single-Sign On](usage/configuration/user_authentication/single_sign_on/README.md)
|
- [Single-Sign On](usage/configuration/user_authentication/single_sign_on/README.md)
|
||||||
- [OpenID Connect](usage/configuration/user_authentication/single_sign_on/openid.md)
|
- [OpenID Connect](openid.md)
|
||||||
- [SAML](usage/configuration/user_authentication/single_sign_on/saml.md)
|
- [SAML](usage/configuration/user_authentication/single_sign_on/saml.md)
|
||||||
- [CAS](usage/configuration/user_authentication/single_sign_on/cas.md)
|
- [CAS](usage/configuration/user_authentication/single_sign_on/cas.md)
|
||||||
- [SSO Mapping Providers](usage/configuration/user_authentication/single_sign_on/sso_mapping_providers.md)
|
- [SSO Mapping Providers](sso_mapping_providers.md)
|
||||||
- [Password Auth Providers](usage/configuration/user_authentication/password_auth_providers.md)
|
- [Password Auth Providers](password_auth_providers.md)
|
||||||
- [JSON Web Tokens](usage/configuration/json_web_tokens.md)
|
- [JSON Web Tokens](jwt.md)
|
||||||
- [Registration Captcha](usage/configuration/registration_captcha.md)
|
- [Registration Captcha](CAPTCHA_SETUP.md)
|
||||||
- [Application Services](usage/configuration/application_services.md)
|
- [Application Services](application_services.md)
|
||||||
- [Server Notices](usage/configuration/server_notices.md)
|
- [Server Notices](server_notices.md)
|
||||||
- [Consent Tracking](usage/configuration/consent_tracking.md)
|
- [Consent Tracking](consent_tracking.md)
|
||||||
- [URL Previews](development/url_previews.md)
|
- [URL Previews](development/url_previews.md)
|
||||||
- [User Directory](usage/configuration/user_directory.md)
|
- [User Directory](user_directory.md)
|
||||||
- [Message Retention Policies](usage/configuration/message_retention_policies.md)
|
- [Message Retention Policies](message_retention_policies.md)
|
||||||
- [Pluggable Modules](modules/index.md)
|
- [Pluggable Modules](modules/index.md)
|
||||||
- [Writing a module](modules/writing_a_module.md)
|
- [Writing a module](modules/writing_a_module.md)
|
||||||
- [Spam checker callbacks](modules/spam_checker_callbacks.md)
|
- [Spam checker callbacks](modules/spam_checker_callbacks.md)
|
||||||
@@ -45,8 +45,8 @@
|
|||||||
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
||||||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||||
- [Workers](usage/configuration/workers/README.md)
|
- [Workers](workers.md)
|
||||||
- [Using `synctl` with Workers](usage/configuration/workers/synctl_workers.md)
|
- [Using `synctl` with Workers](synctl_workers.md)
|
||||||
- [Systemd](systemd-with-workers/README.md)
|
- [Systemd](systemd-with-workers/README.md)
|
||||||
- [Administration](usage/administration/README.md)
|
- [Administration](usage/administration/README.md)
|
||||||
- [Admin API](usage/administration/admin_api/README.md)
|
- [Admin API](usage/administration/admin_api/README.md)
|
||||||
@@ -64,33 +64,33 @@
|
|||||||
- [Statistics](admin_api/statistics.md)
|
- [Statistics](admin_api/statistics.md)
|
||||||
- [Users](admin_api/user_admin_api.md)
|
- [Users](admin_api/user_admin_api.md)
|
||||||
- [Server Version](admin_api/version_api.md)
|
- [Server Version](admin_api/version_api.md)
|
||||||
- [Manhole](usage/administration/manhole.md)
|
- [Manhole](manhole.md)
|
||||||
- [Monitoring](usage/administration/monitoring.md)
|
- [Monitoring](metrics-howto.md)
|
||||||
- [Request log format](usage/administration/request_log.md)
|
- [Request log format](usage/administration/request_log.md)
|
||||||
- [Scripts]()
|
- [Scripts]()
|
||||||
|
|
||||||
# Development
|
# Development
|
||||||
- [Contributing Guide](development/contributing_guide.md)
|
- [Contributing Guide](development/contributing_guide.md)
|
||||||
- [Code Style](development/code_style.md)
|
- [Code Style](code_style.md)
|
||||||
- [Git Usage](development/git.md)
|
- [Git Usage](development/git.md)
|
||||||
- [Testing]()
|
- [Testing]()
|
||||||
- [OpenTracing](development/opentracing.md)
|
- [OpenTracing](opentracing.md)
|
||||||
- [Database Schemas](development/database_schema.md)
|
- [Database Schemas](development/database_schema.md)
|
||||||
- [Experimental features](development/experimental_features.md)
|
- [Experimental features](development/experimental_features.md)
|
||||||
- [Synapse Architecture]()
|
- [Synapse Architecture]()
|
||||||
- [Log Contexts](development/synapse_architecture/log_contexts.md)
|
- [Log Contexts](log_contexts.md)
|
||||||
- [Replication](development/synapse_architecture/replication.md)
|
- [Replication](replication.md)
|
||||||
- [TCP Replication](development/synapse_architecture/tcp_replication.md)
|
- [TCP Replication](tcp_replication.md)
|
||||||
- [Internal Documentation](development/internal_documentation/README.md)
|
- [Internal Documentation](development/internal_documentation/README.md)
|
||||||
- [Single Sign-On]()
|
- [Single Sign-On]()
|
||||||
- [SAML](development/saml.md)
|
- [SAML](development/saml.md)
|
||||||
- [CAS](development/cas.md)
|
- [CAS](development/cas.md)
|
||||||
- [Room DAG concepts](development/room-dag-concepts.md)
|
- [Room DAG concepts](development/room-dag-concepts.md)
|
||||||
- [State Resolution]()
|
- [State Resolution]()
|
||||||
- [The Auth Chain Difference Algorithm](development/internal_documentation/state_resolution/auth_chain_difference_algorithm.md)
|
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||||
- [Media Repository](development/internal_documentation/media_repository.md)
|
- [Media Repository](media_repository.md)
|
||||||
- [Room and User Statistics](development/internal_documentation/room_and_user_statistics.md)
|
- [Room and User Statistics](room_and_user_statistics.md)
|
||||||
- [Scripts]()
|
- [Scripts]()
|
||||||
|
|
||||||
# Other
|
# Other
|
||||||
- [Dependency Deprecation Policy](other/dependency_deprecation_policy.md)
|
- [Dependency Deprecation Policy](deprecation_policy.md)
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
- [Room Details API](#room-details-api)
|
- [Room Details API](#room-details-api)
|
||||||
- [Room Members API](#room-members-api)
|
- [Room Members API](#room-members-api)
|
||||||
- [Room State API](#room-state-api)
|
- [Room State API](#room-state-api)
|
||||||
- [Block Room API](#block-room-api)
|
|
||||||
- [Delete Room API](#delete-room-api)
|
- [Delete Room API](#delete-room-api)
|
||||||
* [Version 1 (old version)](#version-1-old-version)
|
* [Version 1 (old version)](#version-1-old-version)
|
||||||
* [Version 2 (new version)](#version-2-new-version)
|
* [Version 2 (new version)](#version-2-new-version)
|
||||||
@@ -387,83 +386,6 @@ A response body like the following is returned:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
# Block Room API
|
|
||||||
The Block Room admin API allows server admins to block and unblock rooms,
|
|
||||||
and query to see if a given room is blocked.
|
|
||||||
This API can be used to pre-emptively block a room, even if it's unknown to this
|
|
||||||
homeserver. Users will be prevented from joining a blocked room.
|
|
||||||
|
|
||||||
## Block or unblock a room
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
PUT /_synapse/admin/v1/rooms/<room_id>/block
|
|
||||||
```
|
|
||||||
|
|
||||||
with a body of:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"block": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"block": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- `room_id` - The ID of the room.
|
|
||||||
|
|
||||||
The following JSON body parameters are available:
|
|
||||||
|
|
||||||
- `block` - If `true` the room will be blocked and if `false` the room will be unblocked.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are possible in the JSON response body:
|
|
||||||
|
|
||||||
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
|
|
||||||
|
|
||||||
## Get block status
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
GET /_synapse/admin/v1/rooms/<room_id>/block
|
|
||||||
```
|
|
||||||
|
|
||||||
A response body like the following is returned:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"block": true,
|
|
||||||
"user_id": "<user_id>"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
The following parameters should be set in the URL:
|
|
||||||
|
|
||||||
- `room_id` - The ID of the room.
|
|
||||||
|
|
||||||
**Response**
|
|
||||||
|
|
||||||
The following fields are possible in the JSON response body:
|
|
||||||
|
|
||||||
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
|
|
||||||
- `user_id` - An optional string. If the room is blocked (`block` is `true`) shows
|
|
||||||
the user who has add the room to blocking list. Otherwise it is not displayed.
|
|
||||||
|
|
||||||
# Delete Room API
|
# Delete Room API
|
||||||
|
|
||||||
The Delete Room admin API allows server admins to remove rooms from the server
|
The Delete Room admin API allows server admins to remove rooms from the server
|
||||||
|
|||||||
@@ -948,7 +948,7 @@ The following fields are returned in the JSON response body:
|
|||||||
See also the
|
See also the
|
||||||
[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
|
[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
|
||||||
|
|
||||||
## Controlling whether a user is shadow-banned
|
## Shadow-banning users
|
||||||
|
|
||||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
||||||
A shadow-banned users receives successful responses to their client-server API requests,
|
A shadow-banned users receives successful responses to their client-server API requests,
|
||||||
@@ -961,22 +961,16 @@ or broken behaviour for the client. A shadow-banned user will not receive any
|
|||||||
notification and it is generally more appropriate to ban or kick abusive users.
|
notification and it is generally more appropriate to ban or kick abusive users.
|
||||||
A shadow-banned user will be unable to contact anyone on the server.
|
A shadow-banned user will be unable to contact anyone on the server.
|
||||||
|
|
||||||
To shadow-ban a user the API is:
|
The API is:
|
||||||
|
|
||||||
```
|
```
|
||||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||||
```
|
```
|
||||||
|
|
||||||
To un-shadow-ban a user the API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
DELETE /_synapse/admin/v1/users/<user_id>/shadow_ban
|
|
||||||
```
|
|
||||||
|
|
||||||
To use it, you will need to authenticate by providing an `access_token` for a
|
To use it, you will need to authenticate by providing an `access_token` for a
|
||||||
server admin: [Admin API](../usage/administration/admin_api)
|
server admin: [Admin API](../usage/administration/admin_api)
|
||||||
|
|
||||||
An empty JSON dict is returned in both cases.
|
An empty JSON dict is returned.
|
||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ Server with a domain specific API.
|
|||||||
|
|
||||||
1. **Messaging Layer**
|
1. **Messaging Layer**
|
||||||
|
|
||||||
This is what the rest of the homeserver hits to send messages, join rooms,
|
This is what the rest of the Home Server hits to send messages, join rooms,
|
||||||
etc. It also allows you to register callbacks for when it get's notified by
|
etc. It also allows you to register callbacks for when it get's notified by
|
||||||
lower levels that e.g. a new message has been received.
|
lower levels that e.g. a new message has been received.
|
||||||
|
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 41 KiB |
@@ -1,7 +1,7 @@
|
|||||||
<h2 style="color:red">
|
<h2 style="color:red">
|
||||||
This page of the Synapse documentation is now deprecated. For up to date
|
This page of the Synapse documentation is now deprecated. For up to date
|
||||||
documentation on setting up or writing a password auth provider module, please see
|
documentation on setting up or writing a password auth provider module, please see
|
||||||
<a href="modules/index.md">this page</a>.
|
<a href="modules.md">this page</a>.
|
||||||
</h2>
|
</h2>
|
||||||
|
|
||||||
# Password auth provider modules
|
# Password auth provider modules
|
||||||
@@ -647,8 +647,8 @@ retention:
|
|||||||
#
|
#
|
||||||
#federation_certificate_verification_whitelist:
|
#federation_certificate_verification_whitelist:
|
||||||
# - lon.example.com
|
# - lon.example.com
|
||||||
# - "*.domain.com"
|
# - *.domain.com
|
||||||
# - "*.onion"
|
# - *.onion
|
||||||
|
|
||||||
# List of custom certificate authorities for federation traffic.
|
# List of custom certificate authorities for federation traffic.
|
||||||
#
|
#
|
||||||
@@ -2360,8 +2360,8 @@ user_directory:
|
|||||||
# indexes were (re)built was before Synapse 1.44, you'll have to
|
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||||
# rebuild the indexes in order to search through all known users.
|
# rebuild the indexes in order to search through all known users.
|
||||||
# These indexes are built the first time Synapse starts; admins can
|
# These indexes are built the first time Synapse starts; admins can
|
||||||
# manually trigger a rebuild via API following the instructions at
|
# manually trigger a rebuild following the instructions at
|
||||||
# https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
|
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||||
#
|
#
|
||||||
# Uncomment to return search results containing all known users, even if that
|
# Uncomment to return search results containing all known users, even if that
|
||||||
# user does not share a room with the requester.
|
# user does not share a room with the requester.
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
# Overview
|
# Overview
|
||||||
|
|
||||||
This document explains how to enable VoIP relaying on your homeserver with
|
This document explains how to enable VoIP relaying on your Home Server with
|
||||||
TURN.
|
TURN.
|
||||||
|
|
||||||
The synapse Matrix homeserver supports integration with TURN server via the
|
The synapse Matrix Home Server supports integration with TURN server via the
|
||||||
[TURN server REST API](<https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
[TURN server REST API](<https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
||||||
allows the homeserver to generate credentials that are valid for use on the
|
allows the Home Server to generate credentials that are valid for use on the
|
||||||
TURN server through the use of a secret shared between the homeserver and the
|
TURN server through the use of a secret shared between the Home Server and the
|
||||||
TURN server.
|
TURN server.
|
||||||
|
|
||||||
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
|
||||||
@@ -171,10 +171,10 @@ Your homeserver configuration file needs the following extra keys:
|
|||||||
for your TURN server to be given out to your clients. Add separate
|
for your TURN server to be given out to your clients. Add separate
|
||||||
entries for each transport your TURN server supports.
|
entries for each transport your TURN server supports.
|
||||||
2. "`turn_shared_secret`": This is the secret shared between your
|
2. "`turn_shared_secret`": This is the secret shared between your
|
||||||
homeserver and your TURN server, so you should set it to the same
|
Home server and your TURN server, so you should set it to the same
|
||||||
string you used in turnserver.conf.
|
string you used in turnserver.conf.
|
||||||
3. "`turn_user_lifetime`": This is the amount of time credentials
|
3. "`turn_user_lifetime`": This is the amount of time credentials
|
||||||
generated by your homeserver are valid for (in milliseconds).
|
generated by your Home Server are valid for (in milliseconds).
|
||||||
Shorter times offer less potential for abuse at the expense of
|
Shorter times offer less potential for abuse at the expense of
|
||||||
increased traffic between web clients and your home server to
|
increased traffic between web clients and your home server to
|
||||||
refresh credentials. The TURN REST API specification recommends
|
refresh credentials. The TURN REST API specification recommends
|
||||||
@@ -220,7 +220,7 @@ Here are a few things to try:
|
|||||||
anyone who has successfully set this up.
|
anyone who has successfully set this up.
|
||||||
|
|
||||||
* Check that you have opened your firewall to allow TCP and UDP traffic to the
|
* Check that you have opened your firewall to allow TCP and UDP traffic to the
|
||||||
TURN ports (normally 3478 and 5349).
|
TURN ports (normally 3478 and 5479).
|
||||||
|
|
||||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||||
relay ports (49152-65535 by default).
|
relay ports (49152-65535 by default).
|
||||||
@@ -42,6 +42,7 @@ For each update:
|
|||||||
`average_items_per_ms` how many items are processed per millisecond based on an exponential average.
|
`average_items_per_ms` how many items are processed per millisecond based on an exponential average.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Enabled
|
## Enabled
|
||||||
|
|
||||||
This API allow pausing background updates.
|
This API allow pausing background updates.
|
||||||
@@ -81,29 +82,3 @@ The API returns the `enabled` param.
|
|||||||
```
|
```
|
||||||
|
|
||||||
There is also a `GET` version which returns the `enabled` state.
|
There is also a `GET` version which returns the `enabled` state.
|
||||||
|
|
||||||
|
|
||||||
## Run
|
|
||||||
|
|
||||||
This API schedules a specific background update to run. The job starts immediately after calling the API.
|
|
||||||
|
|
||||||
|
|
||||||
The API is:
|
|
||||||
|
|
||||||
```
|
|
||||||
POST /_synapse/admin/v1/background_updates/start_job
|
|
||||||
```
|
|
||||||
|
|
||||||
with the following body:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"job_name": "populate_stats_process_rooms"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The following JSON body parameters are available:
|
|
||||||
|
|
||||||
- `job_name` - A string which job to run. Valid values are:
|
|
||||||
- `populate_stats_process_rooms` - Recalculate the stats for all rooms.
|
|
||||||
- `regenerate_directory` - Recalculate the [user directory](../../../user_directory.md) if it is stale or out of sync.
|
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ who are present in a publicly viewable room present on the server.
|
|||||||
|
|
||||||
The directory info is stored in various tables, which can (typically after
|
The directory info is stored in various tables, which can (typically after
|
||||||
DB corruption) get stale or out of sync. If this happens, for now the
|
DB corruption) get stale or out of sync. If this happens, for now the
|
||||||
solution to fix it is to use the [admin API](usage/administration/admin_api/background_updates.md#run)
|
solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
|
||||||
and execute the job `regenerate_directory`. This should then start a background task to
|
and then restart synapse. This should then start a background task to
|
||||||
flush the current tables and regenerate the directory.
|
flush the current tables and regenerate the directory.
|
||||||
|
|
||||||
Data model
|
Data model
|
||||||
@@ -182,10 +182,10 @@ This worker can handle API requests matching the following regular
|
|||||||
expressions:
|
expressions:
|
||||||
|
|
||||||
# Sync requests
|
# Sync requests
|
||||||
^/_matrix/client/(v2_alpha|r0|v3)/sync$
|
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||||
^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$
|
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||||
^/_matrix/client/(api/v1|r0|v3)/initialSync$
|
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||||
^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
|
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||||
|
|
||||||
# Federation requests
|
# Federation requests
|
||||||
^/_matrix/federation/v1/event/
|
^/_matrix/federation/v1/event/
|
||||||
@@ -216,40 +216,40 @@ expressions:
|
|||||||
^/_matrix/federation/v1/send/
|
^/_matrix/federation/v1/send/
|
||||||
|
|
||||||
# Client API requests
|
# Client API requests
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$
|
^/_matrix/client/(api/v1|r0|unstable)/createRoom$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$
|
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||||
^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/spaces$
|
^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/spaces$
|
||||||
^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/hierarchy$
|
^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/hierarchy$
|
||||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$
|
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/devices$
|
^/_matrix/client/(api/v1|r0|unstable)/devices$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/query$
|
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/changes$
|
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||||
^/_matrix/client/versions$
|
^/_matrix/client/versions$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_groups$
|
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/publicised_groups$
|
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/publicised_groups/
|
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
^/_matrix/client/(api/v1|r0|unstable)/search$
|
||||||
|
|
||||||
# Registration/login requests
|
# Registration/login requests
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||||
^/_matrix/client/(r0|v3|unstable)/register$
|
^/_matrix/client/(r0|unstable)/register$
|
||||||
^/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity$
|
^/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity$
|
||||||
|
|
||||||
# Event sending requests
|
# Event sending requests
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
|
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
|
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||||
|
|
||||||
|
|
||||||
Additionally, the following REST endpoints can be handled for GET requests:
|
Additionally, the following REST endpoints can be handled for GET requests:
|
||||||
@@ -261,14 +261,14 @@ room must be routed to the same instance. Additionally, care must be taken to
|
|||||||
ensure that the purge history admin API is not used while pagination requests
|
ensure that the purge history admin API is not used while pagination requests
|
||||||
for the room are in flight:
|
for the room are in flight:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
|
||||||
|
|
||||||
Additionally, the following endpoints should be included if Synapse is configured
|
Additionally, the following endpoints should be included if Synapse is configured
|
||||||
to use SSO (you only need to include the ones for whichever SSO provider you're
|
to use SSO (you only need to include the ones for whichever SSO provider you're
|
||||||
using):
|
using):
|
||||||
|
|
||||||
# for all SSO providers
|
# for all SSO providers
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login/sso/redirect
|
^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect
|
||||||
^/_synapse/client/pick_idp$
|
^/_synapse/client/pick_idp$
|
||||||
^/_synapse/client/pick_username
|
^/_synapse/client/pick_username
|
||||||
^/_synapse/client/new_user_consent$
|
^/_synapse/client/new_user_consent$
|
||||||
@@ -281,7 +281,7 @@ using):
|
|||||||
^/_synapse/client/saml2/authn_response$
|
^/_synapse/client/saml2/authn_response$
|
||||||
|
|
||||||
# CAS requests.
|
# CAS requests.
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login/cas/ticket$
|
^/_matrix/client/(api/v1|r0|unstable)/login/cas/ticket$
|
||||||
|
|
||||||
Ensure that all SSO logins go to a single process.
|
Ensure that all SSO logins go to a single process.
|
||||||
For multiple workers not handling the SSO endpoints properly, see
|
For multiple workers not handling the SSO endpoints properly, see
|
||||||
@@ -465,7 +465,7 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for
|
|||||||
Handles searches in the user directory. It can handle REST endpoints matching
|
Handles searches in the user directory. It can handle REST endpoints matching
|
||||||
the following regular expressions:
|
the following regular expressions:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$
|
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||||
|
|
||||||
When using this worker you must also set `update_user_directory: False` in the
|
When using this worker you must also set `update_user_directory: False` in the
|
||||||
shared configuration file to stop the main synapse running background
|
shared configuration file to stop the main synapse running background
|
||||||
@@ -477,12 +477,12 @@ Proxies some frequently-requested client endpoints to add caching and remove
|
|||||||
load from the main synapse. It can handle REST endpoints matching the following
|
load from the main synapse. It can handle REST endpoints matching the following
|
||||||
regular expressions:
|
regular expressions:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload
|
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||||
|
|
||||||
If `use_presence` is False in the homeserver config, it can also handle REST
|
If `use_presence` is False in the homeserver config, it can also handle REST
|
||||||
endpoints matching the following regular expressions:
|
endpoints matching the following regular expressions:
|
||||||
|
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
|
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
|
||||||
|
|
||||||
This "stub" presence handler will pass through `GET` request but make the
|
This "stub" presence handler will pass through `GET` request but make the
|
||||||
`PUT` effectively a no-op.
|
`PUT` effectively a no-op.
|
||||||
90
mypy.ini
90
mypy.ini
@@ -160,9 +160,6 @@ disallow_untyped_defs = True
|
|||||||
[mypy-synapse.handlers.*]
|
[mypy-synapse.handlers.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-synapse.metrics.*]
|
|
||||||
disallow_untyped_defs = True
|
|
||||||
|
|
||||||
[mypy-synapse.push.*]
|
[mypy-synapse.push.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
@@ -199,11 +196,92 @@ disallow_untyped_defs = True
|
|||||||
[mypy-synapse.streams.*]
|
[mypy-synapse.streams.*]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-synapse.util.*]
|
[mypy-synapse.util.batching_queue]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-synapse.util.caches.treecache]
|
[mypy-synapse.util.caches.cached_call]
|
||||||
disallow_untyped_defs = False
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.caches.dictionary_cache]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.caches.lrucache]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.caches.response_cache]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.caches.stream_change_cache]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.caches.ttl_cache]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.daemonize]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.file_consumer]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.frozenutils]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.hash]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.httpresourcetree]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.iterutils]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.linked_list]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.logcontext]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.logformatter]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.macaroons]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.manhole]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.module_loader]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.msisdn]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.patch_inline_callbacks]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.ratelimitutils]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.retryutils]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.rlimit]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.stringutils]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.templates]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.threepids]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.wheel_timer]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
[mypy-synapse.util.versionstring]
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
[mypy-tests.handlers.test_user_directory]
|
[mypy-tests.handlers.test_user_directory]
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Change to the repository root
|
# Change to the repository root
|
||||||
cd "$(dirname $0)/.."
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
# Check for a user-specified Complement checkout
|
# Check for a user-specified Complement checkout
|
||||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||||
@@ -61,8 +61,8 @@ cd "$COMPLEMENT_DIR"
|
|||||||
EXTRA_COMPLEMENT_ARGS=""
|
EXTRA_COMPLEMENT_ARGS=""
|
||||||
if [[ -n "$1" ]]; then
|
if [[ -n "$1" ]]; then
|
||||||
# A test name regex has been set, supply it to Complement
|
# A test name regex has been set, supply it to Complement
|
||||||
EXTRA_COMPLEMENT_ARGS+="-run $1 "
|
EXTRA_COMPLEMENT_ARGS=(-run "$1")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run the tests!
|
# Run the tests!
|
||||||
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/...
|
go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403,msc2716 -count=1 "${EXTRA_COMPLEMENT_ARGS[@]}" ./tests/...
|
||||||
|
|||||||
179
scripts-dev/storage_inheritance.py
Executable file
179
scripts-dev/storage_inheritance.py
Executable file
@@ -0,0 +1,179 @@
|
|||||||
|
#! /usr/bin/env python3
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from typing import Iterable, Optional, Set
|
||||||
|
|
||||||
|
import networkx
|
||||||
|
|
||||||
|
|
||||||
|
def scrape_storage_classes() -> str:
|
||||||
|
"""Grep the for classes ending with "Store" and extract their list of parents.
|
||||||
|
|
||||||
|
Returns the stdout from `rg` as a single string."""
|
||||||
|
|
||||||
|
# TODO: this is a big hack which assumes that each Store class has a unique name.
|
||||||
|
# That assumption is wrong: there are two DirectoryStores, one in
|
||||||
|
# synapse/replication/slave/storage/directory.py and the other in
|
||||||
|
# synapse/storage/databases/main/directory.py
|
||||||
|
# Would be nice to have a way to account for this.
|
||||||
|
|
||||||
|
return subprocess.check_output(
|
||||||
|
[
|
||||||
|
"rg",
|
||||||
|
"-o",
|
||||||
|
"--no-line-number",
|
||||||
|
"--no-filename",
|
||||||
|
"--multiline",
|
||||||
|
r"class .*Store\((.|\n)*?\):$",
|
||||||
|
"synapse",
|
||||||
|
"tests",
|
||||||
|
],
|
||||||
|
).decode()
|
||||||
|
|
||||||
|
|
||||||
|
oneline_class_pattern = re.compile(r"^class (.*)\((.*)\):$")
|
||||||
|
opening_class_pattern = re.compile(r"^class (.*)\($")
|
||||||
|
|
||||||
|
|
||||||
|
def load_graph(lines: Iterable[str]) -> networkx.DiGraph:
|
||||||
|
"""Process the output of scrape_storage_classes to build an inheritance graph.
|
||||||
|
|
||||||
|
Every time a class C is created that explicitly inherits from a parent P, we add an
|
||||||
|
edge C -> P.
|
||||||
|
"""
|
||||||
|
G = networkx.DiGraph()
|
||||||
|
child: Optional[str] = None
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
if (match := oneline_class_pattern.match(line)) is not None:
|
||||||
|
child, parents = match.groups()
|
||||||
|
for parent in parents.split(", "):
|
||||||
|
if "metaclass" not in parent:
|
||||||
|
G.add_edge(child, parent)
|
||||||
|
|
||||||
|
child = None
|
||||||
|
elif (match := opening_class_pattern.match(line)) is not None:
|
||||||
|
(child,) = match.groups()
|
||||||
|
elif line == "):":
|
||||||
|
child = None
|
||||||
|
else:
|
||||||
|
assert child is not None, repr(line)
|
||||||
|
parent = line.strip(",")
|
||||||
|
if "metaclass" not in parent:
|
||||||
|
G.add_edge(child, parent)
|
||||||
|
|
||||||
|
return G
|
||||||
|
|
||||||
|
|
||||||
|
def select_vertices_of_interest(G: networkx.DiGraph, target: Optional[str]) -> Set[str]:
|
||||||
|
"""Find all nodes we want to visualise.
|
||||||
|
|
||||||
|
If no TARGET is given, we visualise all of G. Otherwise we visualise a given
|
||||||
|
TARGET, its parents, and all of their parents recursively.
|
||||||
|
|
||||||
|
Requires that G is a DAG.
|
||||||
|
If not None, the TARGET must belong to G.
|
||||||
|
"""
|
||||||
|
assert networkx.is_directed_acyclic_graph(G)
|
||||||
|
if target is not None:
|
||||||
|
component: Set[str] = networkx.descendants(G, target)
|
||||||
|
component.add(target)
|
||||||
|
else:
|
||||||
|
component = set(G.nodes)
|
||||||
|
return component
|
||||||
|
|
||||||
|
|
||||||
|
def generate_dot_source(G: networkx.DiGraph, nodes: Set[str]) -> str:
|
||||||
|
output = """\
|
||||||
|
strict digraph {
|
||||||
|
rankdir="LR";
|
||||||
|
node [shape=box];
|
||||||
|
|
||||||
|
"""
|
||||||
|
for (child, parent) in G.edges:
|
||||||
|
if child in nodes and parent in nodes:
|
||||||
|
output += f" {child} -> {parent};\n"
|
||||||
|
output += "}\n"
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def render_png(dot_source: str, destination: Optional[str]) -> str:
|
||||||
|
if destination is None:
|
||||||
|
handle, destination = tempfile.mkstemp()
|
||||||
|
os.close(handle)
|
||||||
|
print("Warning: writing to", destination, "which will persist", file=sys.stderr)
|
||||||
|
|
||||||
|
subprocess.run(
|
||||||
|
[
|
||||||
|
"dot",
|
||||||
|
"-o",
|
||||||
|
destination,
|
||||||
|
"-Tpng",
|
||||||
|
],
|
||||||
|
input=dot_source,
|
||||||
|
encoding="utf-8",
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
return destination
|
||||||
|
|
||||||
|
|
||||||
|
def show_graph(location: str) -> None:
|
||||||
|
subprocess.run(
|
||||||
|
["xdg-open", location],
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main(parser: argparse.ArgumentParser, args: argparse.Namespace) -> int:
|
||||||
|
if not (args.output or args.show):
|
||||||
|
parser.print_help(file=sys.stderr)
|
||||||
|
print("Must either --output or --show, or both.", file=sys.stderr)
|
||||||
|
return os.EX_USAGE
|
||||||
|
|
||||||
|
lines = scrape_storage_classes().split("\n")
|
||||||
|
G = load_graph(lines)
|
||||||
|
nodes = select_vertices_of_interest(G, args.target)
|
||||||
|
dot_source = generate_dot_source(G, nodes)
|
||||||
|
output_location = render_png(dot_source, args.output)
|
||||||
|
if args.show:
|
||||||
|
show_graph(output_location)
|
||||||
|
return os.EX_OK
|
||||||
|
|
||||||
|
|
||||||
|
def build_parser() -> argparse.ArgumentParser:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Visualise the inheritance of Synapse's storage classes. Requires "
|
||||||
|
"ripgrep (https://github.com/BurntSushi/ripgrep) as 'rg'; graphviz "
|
||||||
|
"(https://graphviz.org/) for the 'dot' program; and networkx "
|
||||||
|
"(https://networkx.org/). Requires Python 3.8+ for the walrus"
|
||||||
|
"operator."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"target",
|
||||||
|
nargs="?",
|
||||||
|
help="Show only TARGET and its ancestors. Otherwise, show the entire hierarchy.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
nargs=1,
|
||||||
|
help="Render inheritance graph to a png file.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--show",
|
||||||
|
action="store_true",
|
||||||
|
help="Open the inheritance graph in an image viewer.",
|
||||||
|
)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = build_parser()
|
||||||
|
args = parser.parse_args()
|
||||||
|
sys.exit(main(parser, args))
|
||||||
2
setup.py
2
setup.py
@@ -135,6 +135,8 @@ CONDITIONAL_REQUIREMENTS["dev"] = (
|
|||||||
# The following are executed as commands by the release script.
|
# The following are executed as commands by the release script.
|
||||||
"twine",
|
"twine",
|
||||||
"towncrier",
|
"towncrier",
|
||||||
|
# For storage_inheritance script
|
||||||
|
"networkx==2.6.3",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.47.0"
|
__version__ = "1.47.0rc2"
|
||||||
|
|
||||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||||
# We import here so that we don't have to install a bunch of deps when
|
# We import here so that we don't have to install a bunch of deps when
|
||||||
|
|||||||
@@ -30,8 +30,7 @@ FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
|
|||||||
STATIC_PREFIX = "/_matrix/static"
|
STATIC_PREFIX = "/_matrix/static"
|
||||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_R0_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
MEDIA_V3_PREFIX = "/_matrix/media/v3"
|
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -402,7 +402,7 @@ async def start(hs: "HomeServer") -> None:
|
|||||||
if hasattr(signal, "SIGHUP"):
|
if hasattr(signal, "SIGHUP"):
|
||||||
|
|
||||||
@wrap_as_background_process("sighup")
|
@wrap_as_background_process("sighup")
|
||||||
async def handle_sighup(*args: Any, **kwargs: Any) -> None:
|
def handle_sighup(*args: Any, **kwargs: Any) -> None:
|
||||||
# Tell systemd our state, if we're using it. This will silently fail if
|
# Tell systemd our state, if we're using it. This will silently fail if
|
||||||
# we're not using systemd.
|
# we're not using systemd.
|
||||||
sdnotify(b"RELOADING=1")
|
sdnotify(b"RELOADING=1")
|
||||||
|
|||||||
@@ -26,8 +26,7 @@ from synapse.api.urls import (
|
|||||||
CLIENT_API_PREFIX,
|
CLIENT_API_PREFIX,
|
||||||
FEDERATION_PREFIX,
|
FEDERATION_PREFIX,
|
||||||
LEGACY_MEDIA_PREFIX,
|
LEGACY_MEDIA_PREFIX,
|
||||||
MEDIA_R0_PREFIX,
|
MEDIA_PREFIX,
|
||||||
MEDIA_V3_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
SERVER_KEY_V2_PREFIX,
|
||||||
)
|
)
|
||||||
from synapse.app import _base
|
from synapse.app import _base
|
||||||
@@ -339,8 +338,7 @@ class GenericWorkerServer(HomeServer):
|
|||||||
|
|
||||||
resources.update(
|
resources.update(
|
||||||
{
|
{
|
||||||
MEDIA_R0_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
MEDIA_V3_PREFIX: media_repo,
|
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
"/_synapse/admin": admin_resource,
|
"/_synapse/admin": admin_resource,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,8 +29,7 @@ from synapse import events
|
|||||||
from synapse.api.urls import (
|
from synapse.api.urls import (
|
||||||
FEDERATION_PREFIX,
|
FEDERATION_PREFIX,
|
||||||
LEGACY_MEDIA_PREFIX,
|
LEGACY_MEDIA_PREFIX,
|
||||||
MEDIA_R0_PREFIX,
|
MEDIA_PREFIX,
|
||||||
MEDIA_V3_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
SERVER_KEY_V2_PREFIX,
|
||||||
STATIC_PREFIX,
|
STATIC_PREFIX,
|
||||||
WEB_CLIENT_PREFIX,
|
WEB_CLIENT_PREFIX,
|
||||||
@@ -194,7 +193,6 @@ class SynapseHomeServer(HomeServer):
|
|||||||
{
|
{
|
||||||
"/_matrix/client/api/v1": client_resource,
|
"/_matrix/client/api/v1": client_resource,
|
||||||
"/_matrix/client/r0": client_resource,
|
"/_matrix/client/r0": client_resource,
|
||||||
"/_matrix/client/v3": client_resource,
|
|
||||||
"/_matrix/client/unstable": client_resource,
|
"/_matrix/client/unstable": client_resource,
|
||||||
"/_matrix/client/v2_alpha": client_resource,
|
"/_matrix/client/v2_alpha": client_resource,
|
||||||
"/_matrix/client/versions": client_resource,
|
"/_matrix/client/versions": client_resource,
|
||||||
@@ -246,11 +244,7 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if self.config.server.enable_media_repo:
|
if self.config.server.enable_media_repo:
|
||||||
media_repo = self.get_media_repository_resource()
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update(
|
resources.update(
|
||||||
{
|
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
|
||||||
MEDIA_R0_PREFIX: media_repo,
|
|
||||||
MEDIA_V3_PREFIX: media_repo,
|
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
|
|||||||
@@ -231,32 +231,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
json_body=body,
|
json_body=body,
|
||||||
args={"access_token": service.hs_token},
|
args={"access_token": service.hs_token},
|
||||||
)
|
)
|
||||||
if logger.isEnabledFor(logging.DEBUG):
|
|
||||||
logger.debug(
|
|
||||||
"push_bulk to %s succeeded! events=%s",
|
|
||||||
uri,
|
|
||||||
[event.get("event_id") for event in events],
|
|
||||||
)
|
|
||||||
sent_transactions_counter.labels(service.id).inc()
|
sent_transactions_counter.labels(service.id).inc()
|
||||||
sent_events_counter.labels(service.id).inc(len(events))
|
sent_events_counter.labels(service.id).inc(len(events))
|
||||||
return True
|
return True
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
logger.warning(
|
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||||
"push_bulk to %s received code=%s msg=%s",
|
|
||||||
uri,
|
|
||||||
e.code,
|
|
||||||
e.msg,
|
|
||||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
|
||||||
)
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning(
|
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||||
"push_bulk to %s threw exception(%s) %s args=%s",
|
|
||||||
uri,
|
|
||||||
type(ex).__name__,
|
|
||||||
ex,
|
|
||||||
ex.args,
|
|
||||||
exc_info=logger.isEnabledFor(logging.DEBUG),
|
|
||||||
)
|
|
||||||
failed_transactions_counter.labels(service.id).inc()
|
failed_transactions_counter.labels(service.id).inc()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -137,13 +137,32 @@ class EmailConfig(Config):
|
|||||||
if self.root.registration.account_threepid_delegate_email
|
if self.root.registration.account_threepid_delegate_email
|
||||||
else ThreepidBehaviour.LOCAL
|
else ThreepidBehaviour.LOCAL
|
||||||
)
|
)
|
||||||
|
# Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
|
||||||
|
# use an identity server to password reset tokens on its behalf. We now warn the user
|
||||||
|
# if they have this set and tell them to use the updated option, while using a default
|
||||||
|
# identity server in the process.
|
||||||
|
self.using_identity_server_from_trusted_list = False
|
||||||
|
if (
|
||||||
|
not self.root.registration.account_threepid_delegate_email
|
||||||
|
and config.get("trust_identity_server_for_password_resets", False) is True
|
||||||
|
):
|
||||||
|
# Use the first entry in self.trusted_third_party_id_servers instead
|
||||||
|
if self.trusted_third_party_id_servers:
|
||||||
|
# XXX: It's a little confusing that account_threepid_delegate_email is modified
|
||||||
|
# both in RegistrationConfig and here. We should factor this bit out
|
||||||
|
|
||||||
if config.get("trust_identity_server_for_password_resets"):
|
first_trusted_identity_server = self.trusted_third_party_id_servers[0]
|
||||||
|
|
||||||
|
# trusted_third_party_id_servers does not contain a scheme whereas
|
||||||
|
# account_threepid_delegate_email is expected to. Presume https
|
||||||
|
self.root.registration.account_threepid_delegate_email = (
|
||||||
|
"https://" + first_trusted_identity_server
|
||||||
|
)
|
||||||
|
self.using_identity_server_from_trusted_list = True
|
||||||
|
else:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
'The config option "trust_identity_server_for_password_resets" '
|
"Attempted to use an identity server from"
|
||||||
'has been replaced by "account_threepid_delegate". '
|
'"trusted_third_party_id_servers" but it is empty.'
|
||||||
"Please consult the sample config at docs/sample_config.yaml for "
|
|
||||||
"details and update your config file."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.local_threepid_handling_disabled_due_to_email_config = False
|
self.local_threepid_handling_disabled_due_to_email_config = False
|
||||||
|
|||||||
@@ -39,7 +39,9 @@ class RegistrationConfig(Config):
|
|||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
|
self.trusted_third_party_id_servers = config.get(
|
||||||
|
"trusted_third_party_id_servers", ["matrix.org", "vector.im"]
|
||||||
|
)
|
||||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
# Copyright 2018 New Vector Ltd
|
# Copyright 2018 New Vector Ltd
|
||||||
# Copyright 2021 Matrix.org Foundation C.I.C.
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -13,9 +12,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from synapse.types import JsonDict
|
|
||||||
from synapse.util import glob_to_regex
|
from synapse.util import glob_to_regex
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
@@ -24,7 +20,7 @@ from ._base import Config, ConfigError
|
|||||||
class RoomDirectoryConfig(Config):
|
class RoomDirectoryConfig(Config):
|
||||||
section = "roomdirectory"
|
section = "roomdirectory"
|
||||||
|
|
||||||
def read_config(self, config, **kwargs) -> None:
|
def read_config(self, config, **kwargs):
|
||||||
self.enable_room_list_search = config.get("enable_room_list_search", True)
|
self.enable_room_list_search = config.get("enable_room_list_search", True)
|
||||||
|
|
||||||
alias_creation_rules = config.get("alias_creation_rules")
|
alias_creation_rules = config.get("alias_creation_rules")
|
||||||
@@ -51,7 +47,7 @@ class RoomDirectoryConfig(Config):
|
|||||||
_RoomDirectoryRule("room_list_publication_rules", {"action": "allow"})
|
_RoomDirectoryRule("room_list_publication_rules", {"action": "allow"})
|
||||||
]
|
]
|
||||||
|
|
||||||
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
|
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||||
return """
|
return """
|
||||||
# Uncomment to disable searching the public room list. When disabled
|
# Uncomment to disable searching the public room list. When disabled
|
||||||
# blocks searching local and remote room lists for local and remote
|
# blocks searching local and remote room lists for local and remote
|
||||||
@@ -117,16 +113,16 @@ class RoomDirectoryConfig(Config):
|
|||||||
# action: allow
|
# action: allow
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> bool:
|
def is_alias_creation_allowed(self, user_id, room_id, alias):
|
||||||
"""Checks if the given user is allowed to create the given alias
|
"""Checks if the given user is allowed to create the given alias
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id: The user to check.
|
user_id (str)
|
||||||
room_id: The room ID for the alias.
|
room_id (str)
|
||||||
alias: The alias being created.
|
alias (str)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if user is allowed to create the alias
|
boolean: True if user is allowed to create the alias
|
||||||
"""
|
"""
|
||||||
for rule in self._alias_creation_rules:
|
for rule in self._alias_creation_rules:
|
||||||
if rule.matches(user_id, room_id, [alias]):
|
if rule.matches(user_id, room_id, [alias]):
|
||||||
@@ -134,18 +130,16 @@ class RoomDirectoryConfig(Config):
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def is_publishing_room_allowed(
|
def is_publishing_room_allowed(self, user_id, room_id, aliases):
|
||||||
self, user_id: str, room_id: str, aliases: List[str]
|
|
||||||
) -> bool:
|
|
||||||
"""Checks if the given user is allowed to publish the room
|
"""Checks if the given user is allowed to publish the room
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id: The user ID publishing the room.
|
user_id (str)
|
||||||
room_id: The room being published.
|
room_id (str)
|
||||||
aliases: any local aliases associated with the room
|
aliases (list[str]): any local aliases associated with the room
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if user can publish room
|
boolean: True if user can publish room
|
||||||
"""
|
"""
|
||||||
for rule in self._room_list_publication_rules:
|
for rule in self._room_list_publication_rules:
|
||||||
if rule.matches(user_id, room_id, aliases):
|
if rule.matches(user_id, room_id, aliases):
|
||||||
@@ -159,11 +153,11 @@ class _RoomDirectoryRule:
|
|||||||
creating an alias or publishing a room.
|
creating an alias or publishing a room.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, option_name: str, rule: JsonDict):
|
def __init__(self, option_name, rule):
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
option_name: Name of the config option this rule belongs to
|
option_name (str): Name of the config option this rule belongs to
|
||||||
rule: The rule as specified in the config
|
rule (dict): The rule as specified in the config
|
||||||
"""
|
"""
|
||||||
|
|
||||||
action = rule["action"]
|
action = rule["action"]
|
||||||
@@ -187,18 +181,18 @@ class _RoomDirectoryRule:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ConfigError("Failed to parse glob into regex") from e
|
raise ConfigError("Failed to parse glob into regex") from e
|
||||||
|
|
||||||
def matches(self, user_id: str, room_id: str, aliases: List[str]) -> bool:
|
def matches(self, user_id, room_id, aliases):
|
||||||
"""Tests if this rule matches the given user_id, room_id and aliases.
|
"""Tests if this rule matches the given user_id, room_id and aliases.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id: The user ID to check.
|
user_id (str)
|
||||||
room_id: The room ID to check.
|
room_id (str)
|
||||||
aliases: The associated aliases to the room. Will be a single element
|
aliases (list[str]): The associated aliases to the room. Will be a
|
||||||
for testing alias creation, and can be empty for testing room
|
single element for testing alias creation, and can be empty for
|
||||||
publishing.
|
testing room publishing.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if the rule matches.
|
boolean
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Note: The regexes are anchored at both ends
|
# Note: The regexes are anchored at both ends
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from datetime import datetime
|
||||||
from typing import List, Optional, Pattern
|
from typing import List, Optional, Pattern
|
||||||
|
|
||||||
from OpenSSL import SSL, crypto
|
from OpenSSL import SSL, crypto
|
||||||
@@ -132,6 +133,55 @@ class TlsConfig(Config):
|
|||||||
self.tls_certificate: Optional[crypto.X509] = None
|
self.tls_certificate: Optional[crypto.X509] = None
|
||||||
self.tls_private_key: Optional[crypto.PKey] = None
|
self.tls_private_key: Optional[crypto.PKey] = None
|
||||||
|
|
||||||
|
def is_disk_cert_valid(self, allow_self_signed=True):
|
||||||
|
"""
|
||||||
|
Is the certificate we have on disk valid, and if so, for how long?
|
||||||
|
|
||||||
|
Args:
|
||||||
|
allow_self_signed (bool): Should we allow the certificate we
|
||||||
|
read to be self signed?
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Days remaining of certificate validity.
|
||||||
|
None: No certificate exists.
|
||||||
|
"""
|
||||||
|
if not os.path.exists(self.tls_certificate_file):
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(self.tls_certificate_file, "rb") as f:
|
||||||
|
cert_pem = f.read()
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError(
|
||||||
|
"Failed to read existing certificate file %s: %s"
|
||||||
|
% (self.tls_certificate_file, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError(
|
||||||
|
"Failed to parse existing certificate file %s: %s"
|
||||||
|
% (self.tls_certificate_file, e)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not allow_self_signed:
|
||||||
|
if tls_certificate.get_subject() == tls_certificate.get_issuer():
|
||||||
|
raise ValueError(
|
||||||
|
"TLS Certificate is self signed, and this is not permitted"
|
||||||
|
)
|
||||||
|
|
||||||
|
# YYYYMMDDhhmmssZ -- in UTC
|
||||||
|
expiry_data = tls_certificate.get_notAfter()
|
||||||
|
if expiry_data is None:
|
||||||
|
raise ValueError(
|
||||||
|
"TLS Certificate has no expiry date, and this is not permitted"
|
||||||
|
)
|
||||||
|
expires_on = datetime.strptime(expiry_data.decode("ascii"), "%Y%m%d%H%M%SZ")
|
||||||
|
now = datetime.utcnow()
|
||||||
|
days_remaining = (expires_on - now).days
|
||||||
|
return days_remaining
|
||||||
|
|
||||||
def read_certificate_from_disk(self):
|
def read_certificate_from_disk(self):
|
||||||
"""
|
"""
|
||||||
Read the certificates and private key from disk.
|
Read the certificates and private key from disk.
|
||||||
@@ -213,8 +263,8 @@ class TlsConfig(Config):
|
|||||||
#
|
#
|
||||||
#federation_certificate_verification_whitelist:
|
#federation_certificate_verification_whitelist:
|
||||||
# - lon.example.com
|
# - lon.example.com
|
||||||
# - "*.domain.com"
|
# - *.domain.com
|
||||||
# - "*.onion"
|
# - *.onion
|
||||||
|
|
||||||
# List of custom certificate authorities for federation traffic.
|
# List of custom certificate authorities for federation traffic.
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ class UserDirectoryConfig(Config):
|
|||||||
# indexes were (re)built was before Synapse 1.44, you'll have to
|
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||||
# rebuild the indexes in order to search through all known users.
|
# rebuild the indexes in order to search through all known users.
|
||||||
# These indexes are built the first time Synapse starts; admins can
|
# These indexes are built the first time Synapse starts; admins can
|
||||||
# manually trigger a rebuild via API following the instructions at
|
# manually trigger a rebuild following the instructions at
|
||||||
# https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
|
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||||
#
|
#
|
||||||
# Uncomment to return search results containing all known users, even if that
|
# Uncomment to return search results containing all known users, even if that
|
||||||
# user does not share a room with the requester.
|
# user does not share a room with the requester.
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017, 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -119,6 +120,16 @@ class VerifyJsonRequest:
|
|||||||
key_ids=key_ids,
|
key_ids=key_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_fetch_key_request(self) -> "_FetchKeyRequest":
|
||||||
|
"""Create a key fetch request for all keys needed to satisfy the
|
||||||
|
verification request.
|
||||||
|
"""
|
||||||
|
return _FetchKeyRequest(
|
||||||
|
server_name=self.server_name,
|
||||||
|
minimum_valid_until_ts=self.minimum_valid_until_ts,
|
||||||
|
key_ids=self.key_ids,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class KeyLookupError(ValueError):
|
class KeyLookupError(ValueError):
|
||||||
pass
|
pass
|
||||||
@@ -168,22 +179,8 @@ class Keyring:
|
|||||||
clock=hs.get_clock(),
|
clock=hs.get_clock(),
|
||||||
process_batch_callback=self._inner_fetch_key_requests,
|
process_batch_callback=self._inner_fetch_key_requests,
|
||||||
)
|
)
|
||||||
|
self.verify_key = get_verify_key(hs.signing_key)
|
||||||
self._hostname = hs.hostname
|
self.hostname = hs.hostname
|
||||||
|
|
||||||
# build a FetchKeyResult for each of our own keys, to shortcircuit the
|
|
||||||
# fetcher.
|
|
||||||
self._local_verify_keys: Dict[str, FetchKeyResult] = {}
|
|
||||||
for key_id, key in hs.config.key.old_signing_keys.items():
|
|
||||||
self._local_verify_keys[key_id] = FetchKeyResult(
|
|
||||||
verify_key=key, valid_until_ts=key.expired_ts
|
|
||||||
)
|
|
||||||
|
|
||||||
vk = get_verify_key(hs.signing_key)
|
|
||||||
self._local_verify_keys[f"{vk.alg}:{vk.version}"] = FetchKeyResult(
|
|
||||||
verify_key=vk,
|
|
||||||
valid_until_ts=2 ** 63, # fake future timestamp
|
|
||||||
)
|
|
||||||
|
|
||||||
async def verify_json_for_server(
|
async def verify_json_for_server(
|
||||||
self,
|
self,
|
||||||
@@ -270,32 +267,22 @@ class Keyring:
|
|||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
|
|
||||||
found_keys: Dict[str, FetchKeyResult] = {}
|
# If we are the originating server don't fetch verify key for self over federation
|
||||||
|
if verify_request.server_name == self.hostname:
|
||||||
|
await self._process_json(self.verify_key, verify_request)
|
||||||
|
return
|
||||||
|
|
||||||
# If we are the originating server, short-circuit the key-fetch for any keys
|
|
||||||
# we already have
|
|
||||||
if verify_request.server_name == self._hostname:
|
|
||||||
for key_id in verify_request.key_ids:
|
|
||||||
if key_id in self._local_verify_keys:
|
|
||||||
found_keys[key_id] = self._local_verify_keys[key_id]
|
|
||||||
|
|
||||||
key_ids_to_find = set(verify_request.key_ids) - found_keys.keys()
|
|
||||||
if key_ids_to_find:
|
|
||||||
# Add the keys we need to verify to the queue for retrieval. We queue
|
# Add the keys we need to verify to the queue for retrieval. We queue
|
||||||
# up requests for the same server so we don't end up with many in flight
|
# up requests for the same server so we don't end up with many in flight
|
||||||
# requests for the same keys.
|
# requests for the same keys.
|
||||||
key_request = _FetchKeyRequest(
|
key_request = verify_request.to_fetch_key_request()
|
||||||
server_name=verify_request.server_name,
|
|
||||||
minimum_valid_until_ts=verify_request.minimum_valid_until_ts,
|
|
||||||
key_ids=list(key_ids_to_find),
|
|
||||||
)
|
|
||||||
found_keys_by_server = await self._server_queue.add_to_queue(
|
found_keys_by_server = await self._server_queue.add_to_queue(
|
||||||
key_request, key=verify_request.server_name
|
key_request, key=verify_request.server_name
|
||||||
)
|
)
|
||||||
|
|
||||||
# Since we batch up requests the returned set of keys may contain keys
|
# Since we batch up requests the returned set of keys may contain keys
|
||||||
# from other servers, so we pull out only the ones we care about.
|
# from other servers, so we pull out only the ones we care about.s
|
||||||
found_keys.update(found_keys_by_server.get(verify_request.server_name, {}))
|
found_keys = found_keys_by_server.get(verify_request.server_name, {})
|
||||||
|
|
||||||
# Verify each signature we got valid keys for, raising if we can't
|
# Verify each signature we got valid keys for, raising if we can't
|
||||||
# verify any of them.
|
# verify any of them.
|
||||||
|
|||||||
@@ -40,8 +40,6 @@ from typing import TYPE_CHECKING, Optional, Tuple
|
|||||||
|
|
||||||
from signedjson.sign import sign_json
|
from signedjson.sign import sign_json
|
||||||
|
|
||||||
from twisted.internet.defer import Deferred
|
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import JsonDict, get_domain_from_id
|
from synapse.types import JsonDict, get_domain_from_id
|
||||||
@@ -168,7 +166,7 @@ class GroupAttestionRenewer:
|
|||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _start_renew_attestations(self) -> "Deferred[None]":
|
def _start_renew_attestations(self) -> None:
|
||||||
return run_as_background_process("renew_attestations", self._renew_attestations)
|
return run_as_background_process("renew_attestations", self._renew_attestations)
|
||||||
|
|
||||||
async def _renew_attestations(self) -> None:
|
async def _renew_attestations(self) -> None:
|
||||||
|
|||||||
@@ -790,10 +790,10 @@ class AuthHandler:
|
|||||||
(
|
(
|
||||||
new_refresh_token,
|
new_refresh_token,
|
||||||
new_refresh_token_id,
|
new_refresh_token_id,
|
||||||
) = await self.create_refresh_token_for_user_id(
|
) = await self.get_refresh_token_for_user_id(
|
||||||
user_id=existing_token.user_id, device_id=existing_token.device_id
|
user_id=existing_token.user_id, device_id=existing_token.device_id
|
||||||
)
|
)
|
||||||
access_token = await self.create_access_token_for_user_id(
|
access_token = await self.get_access_token_for_user_id(
|
||||||
user_id=existing_token.user_id,
|
user_id=existing_token.user_id,
|
||||||
device_id=existing_token.device_id,
|
device_id=existing_token.device_id,
|
||||||
valid_until_ms=valid_until_ms,
|
valid_until_ms=valid_until_ms,
|
||||||
@@ -832,7 +832,7 @@ class AuthHandler:
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def create_refresh_token_for_user_id(
|
async def get_refresh_token_for_user_id(
|
||||||
self,
|
self,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
device_id: str,
|
device_id: str,
|
||||||
@@ -855,7 +855,7 @@ class AuthHandler:
|
|||||||
)
|
)
|
||||||
return refresh_token, refresh_token_id
|
return refresh_token, refresh_token_id
|
||||||
|
|
||||||
async def create_access_token_for_user_id(
|
async def get_access_token_for_user_id(
|
||||||
self,
|
self,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
device_id: Optional[str],
|
device_id: Optional[str],
|
||||||
@@ -1828,6 +1828,13 @@ def load_single_legacy_password_auth_provider(
|
|||||||
logger.error("Error while initializing %r: %s", module, e)
|
logger.error("Error while initializing %r: %s", module, e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# The known hooks. If a module implements a method who's name appears in this set
|
||||||
|
# we'll want to register it
|
||||||
|
password_auth_provider_methods = {
|
||||||
|
"check_3pid_auth",
|
||||||
|
"on_logged_out",
|
||||||
|
}
|
||||||
|
|
||||||
# All methods that the module provides should be async, but this wasn't enforced
|
# All methods that the module provides should be async, but this wasn't enforced
|
||||||
# in the old module system, so we wrap them if needed
|
# in the old module system, so we wrap them if needed
|
||||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||||
@@ -1912,14 +1919,11 @@ def load_single_legacy_password_auth_provider(
|
|||||||
|
|
||||||
return run
|
return run
|
||||||
|
|
||||||
# If the module has these methods implemented, then we pull them out
|
# populate hooks with the implemented methods, wrapped with async_wrapper
|
||||||
# and register them as hooks.
|
hooks = {
|
||||||
check_3pid_auth_hook: Optional[CHECK_3PID_AUTH_CALLBACK] = async_wrapper(
|
hook: async_wrapper(getattr(provider, hook, None))
|
||||||
getattr(provider, "check_3pid_auth", None)
|
for hook in password_auth_provider_methods
|
||||||
)
|
}
|
||||||
on_logged_out_hook: Optional[ON_LOGGED_OUT_CALLBACK] = async_wrapper(
|
|
||||||
getattr(provider, "on_logged_out", None)
|
|
||||||
)
|
|
||||||
|
|
||||||
supported_login_types = {}
|
supported_login_types = {}
|
||||||
# call get_supported_login_types and add that to the dict
|
# call get_supported_login_types and add that to the dict
|
||||||
@@ -1946,11 +1950,7 @@ def load_single_legacy_password_auth_provider(
|
|||||||
# need to use a tuple here for ("password",) not a list since lists aren't hashable
|
# need to use a tuple here for ("password",) not a list since lists aren't hashable
|
||||||
auth_checkers[(LoginType.PASSWORD, ("password",))] = check_password
|
auth_checkers[(LoginType.PASSWORD, ("password",))] = check_password
|
||||||
|
|
||||||
api.register_password_auth_provider_callbacks(
|
api.register_password_auth_provider_callbacks(hooks, auth_checkers=auth_checkers)
|
||||||
check_3pid_auth=check_3pid_auth_hook,
|
|
||||||
on_logged_out=on_logged_out_hook,
|
|
||||||
auth_checkers=auth_checkers,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
CHECK_3PID_AUTH_CALLBACK = Callable[
|
CHECK_3PID_AUTH_CALLBACK = Callable[
|
||||||
|
|||||||
@@ -464,6 +464,15 @@ class IdentityHandler:
|
|||||||
if next_link:
|
if next_link:
|
||||||
params["next_link"] = next_link
|
params["next_link"] = next_link
|
||||||
|
|
||||||
|
if self.hs.config.email.using_identity_server_from_trusted_list:
|
||||||
|
# Warn that a deprecated config option is in use
|
||||||
|
logger.warning(
|
||||||
|
'The config option "trust_identity_server_for_password_resets" '
|
||||||
|
'has been replaced by "account_threepid_delegate". '
|
||||||
|
"Please consult the sample config at docs/sample_config.yaml for "
|
||||||
|
"details and update your config file."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = await self.http_client.post_json_get_json(
|
data = await self.http_client.post_json_get_json(
|
||||||
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
|
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
|
||||||
@@ -508,6 +517,15 @@ class IdentityHandler:
|
|||||||
if next_link:
|
if next_link:
|
||||||
params["next_link"] = next_link
|
params["next_link"] = next_link
|
||||||
|
|
||||||
|
if self.hs.config.email.using_identity_server_from_trusted_list:
|
||||||
|
# Warn that a deprecated config option is in use
|
||||||
|
logger.warning(
|
||||||
|
'The config option "trust_identity_server_for_password_resets" '
|
||||||
|
'has been replaced by "account_threepid_delegate". '
|
||||||
|
"Please consult the sample config at docs/sample_config.yaml for "
|
||||||
|
"details and update your config file."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = await self.http_client.post_json_get_json(
|
data = await self.http_client.post_json_get_json(
|
||||||
id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
|
id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
|
||||||
|
|||||||
@@ -1001,52 +1001,13 @@ class EventCreationHandler:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.validator.validate_new(event, self.config)
|
self.validator.validate_new(event, self.config)
|
||||||
await self._validate_event_relation(event)
|
|
||||||
logger.debug("Created event %s", event.event_id)
|
|
||||||
|
|
||||||
return event, context
|
|
||||||
|
|
||||||
async def _validate_event_relation(self, event: EventBase) -> None:
|
|
||||||
"""
|
|
||||||
Ensure the relation data on a new event is not bogus.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: The event being created.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
SynapseError if the event is invalid.
|
|
||||||
"""
|
|
||||||
|
|
||||||
relation = event.content.get("m.relates_to")
|
|
||||||
if not relation:
|
|
||||||
return
|
|
||||||
|
|
||||||
relation_type = relation.get("rel_type")
|
|
||||||
if not relation_type:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Ensure the parent is real.
|
|
||||||
relates_to = relation.get("event_id")
|
|
||||||
if not relates_to:
|
|
||||||
return
|
|
||||||
|
|
||||||
parent_event = await self.store.get_event(relates_to, allow_none=True)
|
|
||||||
if parent_event:
|
|
||||||
# And in the same room.
|
|
||||||
if parent_event.room_id != event.room_id:
|
|
||||||
raise SynapseError(400, "Relations must be in the same room")
|
|
||||||
|
|
||||||
else:
|
|
||||||
# There must be some reason that the client knows the event exists,
|
|
||||||
# see if there are existing relations. If so, assume everything is fine.
|
|
||||||
if not await self.store.event_is_target_of_relation(relates_to):
|
|
||||||
# Otherwise, the client can't know about the parent event!
|
|
||||||
raise SynapseError(400, "Can't send relation to unknown event")
|
|
||||||
|
|
||||||
# If this event is an annotation then we check that that the sender
|
# If this event is an annotation then we check that that the sender
|
||||||
# can't annotate the same way twice (e.g. stops users from liking an
|
# can't annotate the same way twice (e.g. stops users from liking an
|
||||||
# event multiple times).
|
# event multiple times).
|
||||||
if relation_type == RelationTypes.ANNOTATION:
|
relation = event.content.get("m.relates_to", {})
|
||||||
|
if relation.get("rel_type") == RelationTypes.ANNOTATION:
|
||||||
|
relates_to = relation["event_id"]
|
||||||
aggregation_key = relation["key"]
|
aggregation_key = relation["key"]
|
||||||
|
|
||||||
already_exists = await self.store.has_user_annotated_event(
|
already_exists = await self.store.has_user_annotated_event(
|
||||||
@@ -1055,12 +1016,9 @@ class EventCreationHandler:
|
|||||||
if already_exists:
|
if already_exists:
|
||||||
raise SynapseError(400, "Can't send same reaction twice")
|
raise SynapseError(400, "Can't send same reaction twice")
|
||||||
|
|
||||||
# Don't attempt to start a thread if the parent event is a relation.
|
logger.debug("Created event %s", event.event_id)
|
||||||
elif relation_type == RelationTypes.THREAD:
|
|
||||||
if await self.store.event_includes_relation(relates_to):
|
return event, context
|
||||||
raise SynapseError(
|
|
||||||
400, "Cannot start threads from an event with a relation"
|
|
||||||
)
|
|
||||||
|
|
||||||
@measure_func("handle_new_client_event")
|
@measure_func("handle_new_client_event")
|
||||||
async def handle_new_client_event(
|
async def handle_new_client_event(
|
||||||
|
|||||||
@@ -813,13 +813,13 @@ class RegistrationHandler:
|
|||||||
(
|
(
|
||||||
refresh_token,
|
refresh_token,
|
||||||
refresh_token_id,
|
refresh_token_id,
|
||||||
) = await self._auth_handler.create_refresh_token_for_user_id(
|
) = await self._auth_handler.get_refresh_token_for_user_id(
|
||||||
user_id,
|
user_id,
|
||||||
device_id=registered_device_id,
|
device_id=registered_device_id,
|
||||||
)
|
)
|
||||||
valid_until_ms = self.clock.time_msec() + self.access_token_lifetime
|
valid_until_ms = self.clock.time_msec() + self.access_token_lifetime
|
||||||
|
|
||||||
access_token = await self._auth_handler.create_access_token_for_user_id(
|
access_token = await self._auth_handler.get_access_token_for_user_id(
|
||||||
user_id,
|
user_id,
|
||||||
device_id=registered_device_id,
|
device_id=registered_device_id,
|
||||||
valid_until_ms=valid_until_ms,
|
valid_until_ms=valid_until_ms,
|
||||||
|
|||||||
@@ -775,11 +775,8 @@ class RoomCreationHandler:
|
|||||||
raise SynapseError(403, "Room visibility value not allowed.")
|
raise SynapseError(403, "Room visibility value not allowed.")
|
||||||
|
|
||||||
if is_public:
|
if is_public:
|
||||||
room_aliases = []
|
|
||||||
if room_alias:
|
|
||||||
room_aliases.append(room_alias.to_string())
|
|
||||||
if not self.config.roomdirectory.is_publishing_room_allowed(
|
if not self.config.roomdirectory.is_publishing_room_allowed(
|
||||||
user_id, room_id, room_aliases
|
user_id, room_id, room_alias
|
||||||
):
|
):
|
||||||
# Let's just return a generic message, as there may be all sorts of
|
# Let's just return a generic message, as there may be all sorts of
|
||||||
# reasons why we said no. TODO: Allow configurable error messages
|
# reasons why we said no. TODO: Allow configurable error messages
|
||||||
|
|||||||
@@ -221,7 +221,6 @@ class RoomBatchHandler:
|
|||||||
action=membership,
|
action=membership,
|
||||||
content=event_dict["content"],
|
content=event_dict["content"],
|
||||||
outlier=True,
|
outlier=True,
|
||||||
historical=True,
|
|
||||||
prev_event_ids=[prev_event_id_for_state_chain],
|
prev_event_ids=[prev_event_id_for_state_chain],
|
||||||
# Make sure to use a copy of this list because we modify it
|
# Make sure to use a copy of this list because we modify it
|
||||||
# later in the loop here. Otherwise it will be the same
|
# later in the loop here. Otherwise it will be the same
|
||||||
@@ -241,7 +240,6 @@ class RoomBatchHandler:
|
|||||||
),
|
),
|
||||||
event_dict,
|
event_dict,
|
||||||
outlier=True,
|
outlier=True,
|
||||||
historical=True,
|
|
||||||
prev_event_ids=[prev_event_id_for_state_chain],
|
prev_event_ids=[prev_event_id_for_state_chain],
|
||||||
# Make sure to use a copy of this list because we modify it
|
# Make sure to use a copy of this list because we modify it
|
||||||
# later in the loop here. Otherwise it will be the same
|
# later in the loop here. Otherwise it will be the same
|
||||||
|
|||||||
@@ -268,7 +268,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
content: Optional[dict] = None,
|
content: Optional[dict] = None,
|
||||||
require_consent: bool = True,
|
require_consent: bool = True,
|
||||||
outlier: bool = False,
|
outlier: bool = False,
|
||||||
historical: bool = False,
|
|
||||||
) -> Tuple[str, int]:
|
) -> Tuple[str, int]:
|
||||||
"""
|
"""
|
||||||
Internal membership update function to get an existing event or create
|
Internal membership update function to get an existing event or create
|
||||||
@@ -294,9 +293,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
outlier: Indicates whether the event is an `outlier`, i.e. if
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
||||||
it's from an arbitrary point and floating in the DAG as
|
it's from an arbitrary point and floating in the DAG as
|
||||||
opposed to being inline with the current DAG.
|
opposed to being inline with the current DAG.
|
||||||
historical: Indicates whether the message is being inserted
|
|
||||||
back in time around some existing events. This is used to skip
|
|
||||||
a few checks and mark the event as backfilled.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of event ID and stream ordering position
|
Tuple of event ID and stream ordering position
|
||||||
@@ -341,7 +337,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
auth_event_ids=auth_event_ids,
|
auth_event_ids=auth_event_ids,
|
||||||
require_consent=require_consent,
|
require_consent=require_consent,
|
||||||
outlier=outlier,
|
outlier=outlier,
|
||||||
historical=historical,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
prev_state_ids = await context.get_prev_state_ids()
|
prev_state_ids = await context.get_prev_state_ids()
|
||||||
@@ -438,7 +433,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
new_room: bool = False,
|
new_room: bool = False,
|
||||||
require_consent: bool = True,
|
require_consent: bool = True,
|
||||||
outlier: bool = False,
|
outlier: bool = False,
|
||||||
historical: bool = False,
|
|
||||||
prev_event_ids: Optional[List[str]] = None,
|
prev_event_ids: Optional[List[str]] = None,
|
||||||
auth_event_ids: Optional[List[str]] = None,
|
auth_event_ids: Optional[List[str]] = None,
|
||||||
) -> Tuple[str, int]:
|
) -> Tuple[str, int]:
|
||||||
@@ -460,9 +454,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
outlier: Indicates whether the event is an `outlier`, i.e. if
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
||||||
it's from an arbitrary point and floating in the DAG as
|
it's from an arbitrary point and floating in the DAG as
|
||||||
opposed to being inline with the current DAG.
|
opposed to being inline with the current DAG.
|
||||||
historical: Indicates whether the message is being inserted
|
|
||||||
back in time around some existing events. This is used to skip
|
|
||||||
a few checks and mark the event as backfilled.
|
|
||||||
prev_event_ids: The event IDs to use as the prev events
|
prev_event_ids: The event IDs to use as the prev events
|
||||||
auth_event_ids:
|
auth_event_ids:
|
||||||
The event ids to use as the auth_events for the new event.
|
The event ids to use as the auth_events for the new event.
|
||||||
@@ -496,7 +487,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
new_room=new_room,
|
new_room=new_room,
|
||||||
require_consent=require_consent,
|
require_consent=require_consent,
|
||||||
outlier=outlier,
|
outlier=outlier,
|
||||||
historical=historical,
|
|
||||||
prev_event_ids=prev_event_ids,
|
prev_event_ids=prev_event_ids,
|
||||||
auth_event_ids=auth_event_ids,
|
auth_event_ids=auth_event_ids,
|
||||||
)
|
)
|
||||||
@@ -517,7 +507,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
new_room: bool = False,
|
new_room: bool = False,
|
||||||
require_consent: bool = True,
|
require_consent: bool = True,
|
||||||
outlier: bool = False,
|
outlier: bool = False,
|
||||||
historical: bool = False,
|
|
||||||
prev_event_ids: Optional[List[str]] = None,
|
prev_event_ids: Optional[List[str]] = None,
|
||||||
auth_event_ids: Optional[List[str]] = None,
|
auth_event_ids: Optional[List[str]] = None,
|
||||||
) -> Tuple[str, int]:
|
) -> Tuple[str, int]:
|
||||||
@@ -541,9 +530,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
outlier: Indicates whether the event is an `outlier`, i.e. if
|
outlier: Indicates whether the event is an `outlier`, i.e. if
|
||||||
it's from an arbitrary point and floating in the DAG as
|
it's from an arbitrary point and floating in the DAG as
|
||||||
opposed to being inline with the current DAG.
|
opposed to being inline with the current DAG.
|
||||||
historical: Indicates whether the message is being inserted
|
|
||||||
back in time around some existing events. This is used to skip
|
|
||||||
a few checks and mark the event as backfilled.
|
|
||||||
prev_event_ids: The event IDs to use as the prev events
|
prev_event_ids: The event IDs to use as the prev events
|
||||||
auth_event_ids:
|
auth_event_ids:
|
||||||
The event ids to use as the auth_events for the new event.
|
The event ids to use as the auth_events for the new event.
|
||||||
@@ -671,7 +657,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
content=content,
|
content=content,
|
||||||
require_consent=require_consent,
|
require_consent=require_consent,
|
||||||
outlier=outlier,
|
outlier=outlier,
|
||||||
historical=historical,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
|
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ class RoomSummaryHandler:
|
|||||||
# If a user tries to fetch the same page multiple times in quick succession,
|
# If a user tries to fetch the same page multiple times in quick succession,
|
||||||
# only process the first attempt and return its result to subsequent requests.
|
# only process the first attempt and return its result to subsequent requests.
|
||||||
self._pagination_response_cache: ResponseCache[
|
self._pagination_response_cache: ResponseCache[
|
||||||
Tuple[str, str, bool, Optional[int], Optional[int], Optional[str]]
|
Tuple[str, bool, Optional[int], Optional[int], Optional[str]]
|
||||||
] = ResponseCache(
|
] = ResponseCache(
|
||||||
hs.get_clock(),
|
hs.get_clock(),
|
||||||
"get_room_hierarchy",
|
"get_room_hierarchy",
|
||||||
@@ -282,14 +282,7 @@ class RoomSummaryHandler:
|
|||||||
# This is due to the pagination process mutating internal state, attempting
|
# This is due to the pagination process mutating internal state, attempting
|
||||||
# to process multiple requests for the same page will result in errors.
|
# to process multiple requests for the same page will result in errors.
|
||||||
return await self._pagination_response_cache.wrap(
|
return await self._pagination_response_cache.wrap(
|
||||||
(
|
(requested_room_id, suggested_only, max_depth, limit, from_token),
|
||||||
requester,
|
|
||||||
requested_room_id,
|
|
||||||
suggested_only,
|
|
||||||
max_depth,
|
|
||||||
limit,
|
|
||||||
from_token,
|
|
||||||
),
|
|
||||||
self._get_room_hierarchy,
|
self._get_room_hierarchy,
|
||||||
requester,
|
requester,
|
||||||
requested_room_id,
|
requested_room_id,
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ class FollowerTypingHandler:
|
|||||||
self.wheel_timer = WheelTimer(bucket_size=5000)
|
self.wheel_timer = WheelTimer(bucket_size=5000)
|
||||||
|
|
||||||
@wrap_as_background_process("typing._handle_timeouts")
|
@wrap_as_background_process("typing._handle_timeouts")
|
||||||
async def _handle_timeouts(self) -> None:
|
def _handle_timeouts(self) -> None:
|
||||||
logger.debug("Checking for typing timeouts")
|
logger.debug("Checking for typing timeouts")
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
|
|||||||
@@ -20,25 +20,10 @@ import os
|
|||||||
import platform
|
import platform
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from typing import (
|
from typing import Callable, Dict, Iterable, Mapping, Optional, Tuple, Union
|
||||||
Any,
|
|
||||||
Callable,
|
|
||||||
Dict,
|
|
||||||
Generic,
|
|
||||||
Iterable,
|
|
||||||
Mapping,
|
|
||||||
Optional,
|
|
||||||
Sequence,
|
|
||||||
Set,
|
|
||||||
Tuple,
|
|
||||||
Type,
|
|
||||||
TypeVar,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric
|
from prometheus_client import Counter, Gauge, Histogram
|
||||||
from prometheus_client.core import (
|
from prometheus_client.core import (
|
||||||
REGISTRY,
|
REGISTRY,
|
||||||
CounterMetricFamily,
|
CounterMetricFamily,
|
||||||
@@ -47,7 +32,6 @@ from prometheus_client.core import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
from twisted.internet.base import ReactorBase
|
|
||||||
from twisted.python.threadpool import ThreadPool
|
from twisted.python.threadpool import ThreadPool
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
@@ -70,7 +54,7 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
|||||||
|
|
||||||
class RegistryProxy:
|
class RegistryProxy:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def collect() -> Iterable[Metric]:
|
def collect():
|
||||||
for metric in REGISTRY.collect():
|
for metric in REGISTRY.collect():
|
||||||
if not metric.name.startswith("__"):
|
if not metric.name.startswith("__"):
|
||||||
yield metric
|
yield metric
|
||||||
@@ -90,7 +74,7 @@ class LaterGauge:
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
|
|
||||||
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
|
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
|
||||||
|
|
||||||
@@ -109,10 +93,10 @@ class LaterGauge:
|
|||||||
|
|
||||||
yield g
|
yield g
|
||||||
|
|
||||||
def __attrs_post_init__(self) -> None:
|
def __attrs_post_init__(self):
|
||||||
self._register()
|
self._register()
|
||||||
|
|
||||||
def _register(self) -> None:
|
def _register(self):
|
||||||
if self.name in all_gauges.keys():
|
if self.name in all_gauges.keys():
|
||||||
logger.warning("%s already registered, reregistering" % (self.name,))
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
||||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||||
@@ -121,12 +105,7 @@ class LaterGauge:
|
|||||||
all_gauges[self.name] = self
|
all_gauges[self.name] = self
|
||||||
|
|
||||||
|
|
||||||
# `MetricsEntry` only makes sense when it is a `Protocol`,
|
class InFlightGauge:
|
||||||
# but `Protocol` can't be used as a `TypeVar` bound.
|
|
||||||
MetricsEntry = TypeVar("MetricsEntry")
|
|
||||||
|
|
||||||
|
|
||||||
class InFlightGauge(Generic[MetricsEntry]):
|
|
||||||
"""Tracks number of things (e.g. requests, Measure blocks, etc) in flight
|
"""Tracks number of things (e.g. requests, Measure blocks, etc) in flight
|
||||||
at any given time.
|
at any given time.
|
||||||
|
|
||||||
@@ -136,19 +115,14 @@ class InFlightGauge(Generic[MetricsEntry]):
|
|||||||
callbacks.
|
callbacks.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name
|
name (str)
|
||||||
desc
|
desc (str)
|
||||||
labels
|
labels (list[str])
|
||||||
sub_metrics: A list of sub metrics that the callbacks will update.
|
sub_metrics (list[str]): A list of sub metrics that the callbacks
|
||||||
|
will update.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, name, desc, labels, sub_metrics):
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
desc: str,
|
|
||||||
labels: Sequence[str],
|
|
||||||
sub_metrics: Sequence[str],
|
|
||||||
):
|
|
||||||
self.name = name
|
self.name = name
|
||||||
self.desc = desc
|
self.desc = desc
|
||||||
self.labels = labels
|
self.labels = labels
|
||||||
@@ -156,25 +130,19 @@ class InFlightGauge(Generic[MetricsEntry]):
|
|||||||
|
|
||||||
# Create a class which have the sub_metrics values as attributes, which
|
# Create a class which have the sub_metrics values as attributes, which
|
||||||
# default to 0 on initialization. Used to pass to registered callbacks.
|
# default to 0 on initialization. Used to pass to registered callbacks.
|
||||||
self._metrics_class: Type[MetricsEntry] = attr.make_class(
|
self._metrics_class = attr.make_class(
|
||||||
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
|
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Counts number of in flight blocks for a given set of label values
|
# Counts number of in flight blocks for a given set of label values
|
||||||
self._registrations: Dict[
|
self._registrations: Dict = {}
|
||||||
Tuple[str, ...], Set[Callable[[MetricsEntry], None]]
|
|
||||||
] = {}
|
|
||||||
|
|
||||||
# Protects access to _registrations
|
# Protects access to _registrations
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
self._register_with_collector()
|
self._register_with_collector()
|
||||||
|
|
||||||
def register(
|
def register(self, key, callback):
|
||||||
self,
|
|
||||||
key: Tuple[str, ...],
|
|
||||||
callback: Callable[[MetricsEntry], None],
|
|
||||||
) -> None:
|
|
||||||
"""Registers that we've entered a new block with labels `key`.
|
"""Registers that we've entered a new block with labels `key`.
|
||||||
|
|
||||||
`callback` gets called each time the metrics are collected. The same
|
`callback` gets called each time the metrics are collected. The same
|
||||||
@@ -190,17 +158,13 @@ class InFlightGauge(Generic[MetricsEntry]):
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
self._registrations.setdefault(key, set()).add(callback)
|
self._registrations.setdefault(key, set()).add(callback)
|
||||||
|
|
||||||
def unregister(
|
def unregister(self, key, callback):
|
||||||
self,
|
|
||||||
key: Tuple[str, ...],
|
|
||||||
callback: Callable[[MetricsEntry], None],
|
|
||||||
) -> None:
|
|
||||||
"""Registers that we've exited a block with labels `key`."""
|
"""Registers that we've exited a block with labels `key`."""
|
||||||
|
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._registrations.setdefault(key, set()).discard(callback)
|
self._registrations.setdefault(key, set()).discard(callback)
|
||||||
|
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
"""Called by prometheus client when it reads metrics.
|
"""Called by prometheus client when it reads metrics.
|
||||||
|
|
||||||
Note: may be called by a separate thread.
|
Note: may be called by a separate thread.
|
||||||
@@ -236,7 +200,7 @@ class InFlightGauge(Generic[MetricsEntry]):
|
|||||||
gauge.add_metric(key, getattr(metrics, name))
|
gauge.add_metric(key, getattr(metrics, name))
|
||||||
yield gauge
|
yield gauge
|
||||||
|
|
||||||
def _register_with_collector(self) -> None:
|
def _register_with_collector(self):
|
||||||
if self.name in all_gauges.keys():
|
if self.name in all_gauges.keys():
|
||||||
logger.warning("%s already registered, reregistering" % (self.name,))
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
||||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||||
@@ -266,7 +230,7 @@ class GaugeBucketCollector:
|
|||||||
name: str,
|
name: str,
|
||||||
documentation: str,
|
documentation: str,
|
||||||
buckets: Iterable[float],
|
buckets: Iterable[float],
|
||||||
registry: CollectorRegistry = REGISTRY,
|
registry=REGISTRY,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
@@ -293,12 +257,12 @@ class GaugeBucketCollector:
|
|||||||
|
|
||||||
registry.register(self)
|
registry.register(self)
|
||||||
|
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
# Don't report metrics unless we've already collected some data
|
# Don't report metrics unless we've already collected some data
|
||||||
if self._metric is not None:
|
if self._metric is not None:
|
||||||
yield self._metric
|
yield self._metric
|
||||||
|
|
||||||
def update_data(self, values: Iterable[float]) -> None:
|
def update_data(self, values: Iterable[float]):
|
||||||
"""Update the data to be reported by the metric
|
"""Update the data to be reported by the metric
|
||||||
|
|
||||||
The existing data is cleared, and each measurement in the input is assigned
|
The existing data is cleared, and each measurement in the input is assigned
|
||||||
@@ -340,7 +304,7 @@ class GaugeBucketCollector:
|
|||||||
|
|
||||||
|
|
||||||
class CPUMetrics:
|
class CPUMetrics:
|
||||||
def __init__(self) -> None:
|
def __init__(self):
|
||||||
ticks_per_sec = 100
|
ticks_per_sec = 100
|
||||||
try:
|
try:
|
||||||
# Try and get the system config
|
# Try and get the system config
|
||||||
@@ -350,7 +314,7 @@ class CPUMetrics:
|
|||||||
|
|
||||||
self.ticks_per_sec = ticks_per_sec
|
self.ticks_per_sec = ticks_per_sec
|
||||||
|
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
if not HAVE_PROC_SELF_STAT:
|
if not HAVE_PROC_SELF_STAT:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -400,7 +364,7 @@ gc_time = Histogram(
|
|||||||
|
|
||||||
|
|
||||||
class GCCounts:
|
class GCCounts:
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
|
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
|
||||||
for n, m in enumerate(gc.get_count()):
|
for n, m in enumerate(gc.get_count()):
|
||||||
cm.add_metric([str(n)], m)
|
cm.add_metric([str(n)], m)
|
||||||
@@ -418,7 +382,7 @@ if not running_on_pypy:
|
|||||||
|
|
||||||
|
|
||||||
class PyPyGCStats:
|
class PyPyGCStats:
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
|
|
||||||
# @stats is a pretty-printer object with __str__() returning a nice table,
|
# @stats is a pretty-printer object with __str__() returning a nice table,
|
||||||
# plus some fields that contain data from that table.
|
# plus some fields that contain data from that table.
|
||||||
@@ -601,7 +565,7 @@ def register_threadpool(name: str, threadpool: ThreadPool) -> None:
|
|||||||
|
|
||||||
|
|
||||||
class ReactorLastSeenMetric:
|
class ReactorLastSeenMetric:
|
||||||
def collect(self) -> Iterable[Metric]:
|
def collect(self):
|
||||||
cm = GaugeMetricFamily(
|
cm = GaugeMetricFamily(
|
||||||
"python_twisted_reactor_last_seen",
|
"python_twisted_reactor_last_seen",
|
||||||
"Seconds since the Twisted reactor was last seen",
|
"Seconds since the Twisted reactor was last seen",
|
||||||
@@ -620,12 +584,9 @@ MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0)
|
|||||||
_last_gc = [0.0, 0.0, 0.0]
|
_last_gc = [0.0, 0.0, 0.0]
|
||||||
|
|
||||||
|
|
||||||
F = TypeVar("F", bound=Callable[..., Any])
|
def runUntilCurrentTimer(reactor, func):
|
||||||
|
|
||||||
|
|
||||||
def runUntilCurrentTimer(reactor: ReactorBase, func: F) -> F:
|
|
||||||
@functools.wraps(func)
|
@functools.wraps(func)
|
||||||
def f(*args: Any, **kwargs: Any) -> Any:
|
def f(*args, **kwargs):
|
||||||
now = reactor.seconds()
|
now = reactor.seconds()
|
||||||
num_pending = 0
|
num_pending = 0
|
||||||
|
|
||||||
@@ -688,7 +649,7 @@ def runUntilCurrentTimer(reactor: ReactorBase, func: F) -> F:
|
|||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
return cast(F, f)
|
return f
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -716,5 +677,5 @@ __all__ = [
|
|||||||
"start_http_server",
|
"start_http_server",
|
||||||
"LaterGauge",
|
"LaterGauge",
|
||||||
"InFlightGauge",
|
"InFlightGauge",
|
||||||
"GaugeBucketCollector",
|
"BucketCollector",
|
||||||
]
|
]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user