Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2a78554196 | |||
| b194d47d7c | |||
| 4821daf312 | |||
| 9abbd08e33 | |||
| 2d91b6256e | |||
| 6408372234 | |||
| 0f9adc99ad | |||
| 09eff1b3db | |||
| ef7fe09778 | |||
| 57501d9194 | |||
| 62db603fa0 | |||
| 0930e9ae12 | |||
| b0d775ad59 | |||
| 4d6af01a54 | |||
| 2c61a318cc | |||
| ee2cee5f52 | |||
| 106d99b8cd | |||
| 78d5896d19 | |||
| 9b016a0fb4 | |||
| 522489fbcd | |||
| df95d3aec2 | |||
| 0dd0c40329 | |||
| 5e0e683541 | |||
| a6c318735d | |||
| f3efa0036b | |||
| 0170774b19 | |||
| d85bc9a4a7 | |||
| 3ab55d43bd | |||
| cc33d9eee2 | |||
| a5d2ea3d08 | |||
| 73743b8ad1 | |||
| e8f24b6c35 | |||
| 7d70582eb0 | |||
| 37b845dabc | |||
| e09be0c87a | |||
| 5573133348 | |||
| 6a67f3786a | |||
| 013e0f9cae | |||
| daf498e099 | |||
| efd0074ab7 | |||
| e2f0b49b3f | |||
| 1609ccf8fe | |||
| 50d8601581 | |||
| b3698f945c | |||
| 35d6b914eb | |||
| 404444260a | |||
| 317e9e415c | |||
| b59f3281d5 | |||
| b3e9b00fb2 | |||
| 1f9d0b8a7a | |||
| cdd308845b | |||
| 732bbf6737 | |||
| 2a2b189130 | |||
| 8711e15734 | |||
| 988de0afb0 | |||
| 5dcacdf6d1 | |||
| 9abc5f2a05 | |||
| 84f5d83257 | |||
| 8eaffe013c | |||
| 77ea03086c | |||
| 36224e056a | |||
| a18c568516 | |||
| b8b905c4ea | |||
| 6b18eb4430 | |||
| 406f7bfa17 |
Executable
+57
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Test for the export-data admin command against sqlite and postgres
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir="/tmp/export_data/rooms"
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a sqlite database."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir2="/tmp/export_data2/rooms"
|
||||
if [ -d "$dir2" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a postgres database."
|
||||
exit 1
|
||||
fi
|
||||
@@ -122,6 +122,8 @@ jobs:
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -146,6 +148,8 @@ jobs:
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -176,6 +180,8 @@ jobs:
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -247,6 +253,35 @@ jobs:
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
export-data:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: [linting-done, portdb]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.9"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
|
||||
@@ -33,6 +33,8 @@ jobs:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
|
||||
@@ -1,3 +1,12 @@
|
||||
Synapse 1.45.1 (2021-10-20)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Revert change to counting of deactivated users towards the monthly active users limit, introduced in 1.45.0rc1. ([\#11127](https://github.com/matrix-org/synapse/issues/11127))
|
||||
|
||||
|
||||
Synapse 1.45.0 (2021-10-19)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
include synapse/py.typed
|
||||
recursive-include synapse/storage *.sql
|
||||
recursive-include synapse/storage *.sql.postgres
|
||||
recursive-include synapse/storage *.sql.sqlite
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Port the Password Auth Providers module interface to the new generic interface.
|
||||
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.storage.databases.main.client_ips`.
|
||||
@@ -0,0 +1 @@
|
||||
Resolve and share `state_groups` for all [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical events in batch.
|
||||
@@ -0,0 +1 @@
|
||||
Fix spurious warnings about losing the logging context on the `ReplicationCommandHandler` when losing the replication connection.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state.
|
||||
@@ -0,0 +1 @@
|
||||
Include rejected status when we log events.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state.
|
||||
@@ -0,0 +1 @@
|
||||
Add some extra logging to the event persistence code.
|
||||
@@ -0,0 +1 @@
|
||||
Add support for Ubuntu 21.10 "Impish Indri".
|
||||
@@ -0,0 +1 @@
|
||||
Fix 500 error on `/messages` when the server accumulates more than 5 backwards extremities at a given depth for a room.
|
||||
@@ -0,0 +1 @@
|
||||
Rearrange the internal workings of the incremental user directory updates.
|
||||
@@ -0,0 +1 @@
|
||||
Simplify the user admin API tests.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug where setting a user's external_id via the admin API returns 500 and deletes users existing external mappings if that external ID is already mapped.
|
||||
@@ -0,0 +1 @@
|
||||
Mark the Synapse package as containing type annotations and fix export declarations so that Synapse pluggable modules may be type checked against Synapse.
|
||||
@@ -0,0 +1 @@
|
||||
Improve type hints for `_wrap_in_base_path` decorator used by `MediaFilePaths`.
|
||||
@@ -0,0 +1 @@
|
||||
Remove dead code from `MediaFilePaths`.
|
||||
@@ -0,0 +1 @@
|
||||
Add tests for `MediaFilePaths` class.
|
||||
@@ -0,0 +1 @@
|
||||
Be more lenient when parsing oEmbed response versions.
|
||||
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.events`.
|
||||
@@ -0,0 +1 @@
|
||||
Always dump logs from unit tests during CI runs.
|
||||
@@ -0,0 +1 @@
|
||||
Fix broken links relating to module API deprecation in the upgrade notes.
|
||||
@@ -0,0 +1 @@
|
||||
Create a separate module for the retention configuration.
|
||||
@@ -0,0 +1 @@
|
||||
Add a test for the workaround introduced in [\#11042](https://github.com/matrix-org/synapse/pull/11042) concerning the behaviour of third-party rule modules and `SynapseError`s.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where users excluded from the user directory were added into the directory if they belonged to a room which became public or private.
|
||||
@@ -0,0 +1 @@
|
||||
Fix type hints in the relations tests.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug when attempting to preview URLs which are in the `windows-1252` character encoding.
|
||||
@@ -0,0 +1 @@
|
||||
Fix broken export-data admin command and add test script checking the command to CI.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug when attempting to preview URLs which are in the `windows-1252` character encoding.
|
||||
@@ -0,0 +1 @@
|
||||
Clarify the the sample log config can be copied from the documentation without issue.
|
||||
@@ -0,0 +1 @@
|
||||
Update the admin API documentation with an updated list of the characters allowed in registration tokens.
|
||||
@@ -0,0 +1 @@
|
||||
Document Synapse's behaviour when dealing with multiple modules registering the same callbacks and/or handlers for the same HTTP endpoints.
|
||||
@@ -0,0 +1 @@
|
||||
Show an error when timestamp in seconds is provided to the `/purge_media_cache` Admin API.
|
||||
@@ -0,0 +1 @@
|
||||
Fix local users who left all their rooms being removed from the user directory, even if the "search_all_users" config option was enabled.
|
||||
@@ -0,0 +1 @@
|
||||
Add missing type hints to `synapse.api` module.
|
||||
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
@@ -0,0 +1 @@
|
||||
Fix instances of `[example]{.title-ref}` in the upgrade documentation as a result of prior RST to Markdown conversion.
|
||||
@@ -0,0 +1 @@
|
||||
Identity server connection is no longer ignoring `ip_range_whitelist`.
|
||||
@@ -0,0 +1 @@
|
||||
Add type hints for event fetching.
|
||||
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
@@ -0,0 +1 @@
|
||||
Document the version of Synapse each module callback was introduced in.
|
||||
@@ -0,0 +1 @@
|
||||
Partially address occasional bursts of old read receipt and presence data being sent to application services that have opted in to receiving them.
|
||||
@@ -0,0 +1 @@
|
||||
Add docstrings and comments to the application service ephemeral event sending code.
|
||||
@@ -0,0 +1 @@
|
||||
Update the `sign_json` script to support inline configuration of the signing key.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where users excluded from the directory could still be added to the `users_who_share_private_rooms` table after a regular user joins a private room.
|
||||
@@ -0,0 +1 @@
|
||||
Fix broken link in the docker image README.
|
||||
@@ -0,0 +1 @@
|
||||
Add missing type hints to `synapse.crypto`.
|
||||
Vendored
+6
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.45.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Oct 2021 11:58:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.0.
|
||||
|
||||
+2
-1
@@ -226,4 +226,5 @@ healthcheck:
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
- [Third-party rules callbacks](modules/third_party_rules_callbacks.md)
|
||||
- [Presence router callbacks](modules/presence_router_callbacks.md)
|
||||
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
||||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
|
||||
@@ -257,9 +257,9 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||
last access and not the timestamp creation.
|
||||
last access, not the timestamp when the file was created.
|
||||
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
||||
Files that are larger will be deleted. Defaults to `0`.
|
||||
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
||||
@@ -302,7 +302,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
@@ -9,6 +9,8 @@ The available account validity callbacks are:
|
||||
|
||||
### `is_user_expired`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def is_user_expired(user: str) -> Optional[bool]
|
||||
```
|
||||
@@ -22,8 +24,15 @@ If the module returns `True`, the current request will be denied with the error
|
||||
`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
|
||||
invalidate the user's access token.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `on_user_registration`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def on_user_registration(user: str) -> None
|
||||
```
|
||||
@@ -31,3 +40,5 @@ async def on_user_registration(user: str) -> None
|
||||
Called after successfully registering a user, in case the module needs to perform extra
|
||||
operations to keep track of them. (e.g. add them to a database table). The user is
|
||||
represented by their Matrix user ID.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
+26
-7
@@ -2,6 +2,11 @@
|
||||
|
||||
Synapse supports extending its functionality by configuring external modules.
|
||||
|
||||
**Note**: When using third-party modules, you effectively allow someone else to run
|
||||
custom code on your Synapse homeserver. Server admins are encouraged to verify the
|
||||
provenance of the modules they use on their homeserver and make sure the modules aren't
|
||||
running malicious code on their instance.
|
||||
|
||||
## Using modules
|
||||
|
||||
To use a module on Synapse, add it to the `modules` section of the configuration file:
|
||||
@@ -18,17 +23,31 @@ modules:
|
||||
Each module is defined by a path to a Python class as well as a configuration. This
|
||||
information for a given module should be available in the module's own documentation.
|
||||
|
||||
**Note**: When using third-party modules, you effectively allow someone else to run
|
||||
custom code on your Synapse homeserver. Server admins are encouraged to verify the
|
||||
provenance of the modules they use on their homeserver and make sure the modules aren't
|
||||
running malicious code on their instance.
|
||||
## Using multiple modules
|
||||
|
||||
Also note that we are currently in the process of migrating module interfaces to this
|
||||
system. While some interfaces might be compatible with it, others still require
|
||||
configuring modules in another part of Synapse's configuration file.
|
||||
The order in which modules are listed in this section is important. When processing an
|
||||
action that can be handled by several modules, Synapse will always prioritise the module
|
||||
that appears first (i.e. is the highest in the list). This means:
|
||||
|
||||
* If several modules register the same callback, the callback registered by the module
|
||||
that appears first is used.
|
||||
* If several modules try to register a handler for the same HTTP path, only the handler
|
||||
registered by the module that appears first is used. Handlers registered by the other
|
||||
module(s) are ignored and Synapse will log a warning message about them.
|
||||
|
||||
Note that Synapse doesn't allow multiple modules implementing authentication checkers via
|
||||
the password auth provider feature for the same login type with different fields. If this
|
||||
happens, Synapse will refuse to start.
|
||||
|
||||
## Current status
|
||||
|
||||
We are currently in the process of migrating module interfaces to this system. While some
|
||||
interfaces might be compatible with it, others still require configuring modules in
|
||||
another part of Synapse's configuration file.
|
||||
|
||||
Currently, only the following pre-existing interfaces are compatible with this new system:
|
||||
|
||||
* spam checker
|
||||
* third-party rules
|
||||
* presence router
|
||||
* password auth providers
|
||||
|
||||
@@ -0,0 +1,176 @@
|
||||
# Password auth provider callbacks
|
||||
|
||||
Password auth providers offer a way for server administrators to integrate
|
||||
their Synapse installation with an external authentication system. The callbacks can be
|
||||
registered by using the Module API's `register_password_auth_provider_callbacks` method.
|
||||
|
||||
## Callbacks
|
||||
|
||||
### `auth_checkers`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```
|
||||
auth_checkers: Dict[Tuple[str,Tuple], Callable]
|
||||
```
|
||||
|
||||
A dict mapping from tuples of a login type identifier (such as `m.login.password`) and a
|
||||
tuple of field names (such as `("password", "secret_thing")`) to authentication checking
|
||||
callbacks, which should be of the following form:
|
||||
|
||||
```python
|
||||
async def check_auth(
|
||||
user: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
The login type and field names should be provided by the user in the
|
||||
request to the `/login` API. [The Matrix specification](https://matrix.org/docs/spec/client_server/latest#authentication-types)
|
||||
defines some types, however user defined ones are also allowed.
|
||||
|
||||
The callback is passed the `user` field provided by the client (which might not be in
|
||||
`@username:server` form), the login type, and a dictionary of login secrets passed by
|
||||
the client.
|
||||
|
||||
If the authentication is successful, the module must return the user's Matrix ID (e.g.
|
||||
`@alice:example.com`) and optionally a callback to be called with the response to the
|
||||
`/login` request. If the module doesn't wish to return a callback, it must return `None`
|
||||
instead.
|
||||
|
||||
If the authentication is unsuccessful, the module must return `None`.
|
||||
|
||||
If multiple modules register an auth checker for the same login type but with different
|
||||
fields, Synapse will refuse to start.
|
||||
|
||||
If multiple modules register an auth checker for the same login type with the same fields,
|
||||
then the callbacks will be executed in order, until one returns a Matrix User ID (and
|
||||
optionally a callback). In that case, the return value of that callback will be accepted
|
||||
and subsequent callbacks will not be fired. If every callback returns `None`, then the
|
||||
authentication fails.
|
||||
|
||||
### `check_3pid_auth`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```python
|
||||
async def check_3pid_auth(
|
||||
medium: str,
|
||||
address: str,
|
||||
password: str,
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
Called when a user attempts to register or log in with a third party identifier,
|
||||
such as email. It is passed the medium (eg. `email`), an address (eg. `jdoe@example.com`)
|
||||
and the user's password.
|
||||
|
||||
If the authentication is successful, the module must return the user's Matrix ID (e.g.
|
||||
`@alice:example.com`) and optionally a callback to be called with the response to the `/login` request.
|
||||
If the module doesn't wish to return a callback, it must return None instead.
|
||||
|
||||
If the authentication is unsuccessful, the module must return `None`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback. If every callback return `None`,
|
||||
the authentication is denied.
|
||||
|
||||
### `on_logged_out`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```python
|
||||
async def on_logged_out(
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
access_token: str
|
||||
) -> None
|
||||
```
|
||||
Called during a logout request for a user. It is passed the qualified user ID, the ID of the
|
||||
deactivated device (if any: access tokens are occasionally created without an associated
|
||||
device ID), and the (now deactivated) access token.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
## Example
|
||||
|
||||
The example module below implements authentication checkers for two different login types:
|
||||
- `my.login.type`
|
||||
- Expects a `my_field` field to be sent to `/login`
|
||||
- Is checked by the method: `self.check_my_login`
|
||||
- `m.login.password` (defined in [the spec](https://matrix.org/docs/spec/client_server/latest#password-based))
|
||||
- Expects a `password` field to be sent to `/login`
|
||||
- Is checked by the method: `self.check_pass`
|
||||
|
||||
|
||||
```python
|
||||
from typing import Awaitable, Callable, Optional, Tuple
|
||||
|
||||
import synapse
|
||||
from synapse import module_api
|
||||
|
||||
|
||||
class MyAuthProvider:
|
||||
def __init__(self, config: dict, api: module_api):
|
||||
|
||||
self.api = api
|
||||
|
||||
self.credentials = {
|
||||
"bob": "building",
|
||||
"@scoop:matrix.org": "digging",
|
||||
}
|
||||
|
||||
api.register_password_auth_provider_callbacks(
|
||||
auth_checkers={
|
||||
("my.login_type", ("my_field",)): self.check_my_login,
|
||||
("m.login.password", ("password",)): self.check_pass,
|
||||
},
|
||||
)
|
||||
|
||||
async def check_my_login(
|
||||
self,
|
||||
username: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
|
||||
]
|
||||
]:
|
||||
if login_type != "my.login_type":
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("my_field"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
|
||||
async def check_pass(
|
||||
self,
|
||||
username: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
|
||||
]
|
||||
]:
|
||||
if login_type != "m.login.password":
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("password"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
```
|
||||
@@ -12,6 +12,9 @@ should register this resource in its `__init__` method using the `register_web_r
|
||||
method from the `ModuleApi` class (see [this section](writing_a_module.html#registering-a-web-resource) for
|
||||
more info).
|
||||
|
||||
There is no longer a `get_db_schema_files` callback provided for password auth provider modules. Any
|
||||
changes to the database should now be made by the module using the module API class.
|
||||
|
||||
The module's author should also update any example in the module's configuration to only
|
||||
use the new `modules` section in Synapse's configuration file (see [this section](index.html#using-modules)
|
||||
for more info).
|
||||
|
||||
@@ -10,6 +10,8 @@ The available presence router callbacks are:
|
||||
|
||||
### `get_users_for_states`
|
||||
|
||||
_First introduced in Synapse v1.42.0_
|
||||
|
||||
```python
|
||||
async def get_users_for_states(
|
||||
state_updates: Iterable["synapse.api.UserPresenceState"],
|
||||
@@ -24,8 +26,14 @@ must return a dictionary that maps from Matrix user IDs (which can be local or r
|
||||
|
||||
Synapse will then attempt to send the specified presence updates to each user when possible.
|
||||
|
||||
If multiple modules implement this callback, Synapse merges all the dictionaries returned
|
||||
by the callbacks. If multiple callbacks return a dictionary containing the same key,
|
||||
Synapse concatenates the sets associated with this key from each dictionary.
|
||||
|
||||
### `get_interested_users`
|
||||
|
||||
_First introduced in Synapse v1.42.0_
|
||||
|
||||
```python
|
||||
async def get_interested_users(
|
||||
user_id: str
|
||||
@@ -44,6 +52,12 @@ query. The returned users can be local or remote.
|
||||
Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
|
||||
to indicate that the user should receive updates from all known users.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. Synapse
|
||||
calls each callback one by one, and use a concatenation of all the `set`s returned by the
|
||||
callbacks. If one callback returns `synapse.module_api.PRESENCE_ALL_USERS`, Synapse uses
|
||||
this value instead. If this happens, Synapse does not call any of the subsequent
|
||||
implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements both presence router callbacks, and ensures
|
||||
|
||||
@@ -10,6 +10,8 @@ The available spam checker callbacks are:
|
||||
|
||||
### `check_event_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
|
||||
```
|
||||
@@ -19,8 +21,15 @@ either a `bool` to indicate whether the event must be rejected because of spam,
|
||||
to indicate the event must be rejected because of spam and to give a rejection reason to
|
||||
forward to clients.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_join_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
|
||||
```
|
||||
@@ -34,8 +43,15 @@ currently has a pending invite in the room.
|
||||
This callback isn't called if the join is performed by a server administrator, or in the
|
||||
context of a room creation.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_invite`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
|
||||
```
|
||||
@@ -44,8 +60,15 @@ Called when processing an invitation. The module must return a `bool` indicating
|
||||
the inviter can invite the invitee to the given room. Both inviter and invitee are
|
||||
represented by their Matrix user ID (e.g. `@alice:example.com`).
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_send_3pid_invite`
|
||||
|
||||
_First introduced in Synapse v1.45.0_
|
||||
|
||||
```python
|
||||
async def user_may_send_3pid_invite(
|
||||
inviter: str,
|
||||
@@ -79,8 +102,15 @@ await user_may_send_3pid_invite(
|
||||
**Note**: If the third-party identifier is already associated with a matrix user ID,
|
||||
[`user_may_invite`](#user_may_invite) will be used instead.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room(user: str) -> bool
|
||||
```
|
||||
@@ -88,8 +118,15 @@ async def user_may_create_room(user: str) -> bool
|
||||
Called when processing a room creation request. The module must return a `bool` indicating
|
||||
whether the given user (represented by their Matrix user ID) is allowed to create a room.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room_with_invites`
|
||||
|
||||
_First introduced in Synapse v1.44.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room_with_invites(
|
||||
user: str,
|
||||
@@ -117,8 +154,15 @@ corresponding list(s) will be empty.
|
||||
since no invites are sent when cloning a room. To cover this case, modules also need to
|
||||
implement `user_may_create_room`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room_alias`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
|
||||
```
|
||||
@@ -127,8 +171,15 @@ Called when trying to associate an alias with an existing room. The module must
|
||||
`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
|
||||
to set the given alias.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_publish_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_publish_room(user: str, room_id: str) -> bool
|
||||
```
|
||||
@@ -137,8 +188,15 @@ Called when trying to publish a room to the homeserver's public rooms directory.
|
||||
module must return a `bool` indicating whether the given user (represented by their
|
||||
Matrix user ID) is allowed to publish the given room.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_username_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
|
||||
```
|
||||
@@ -154,8 +212,15 @@ is represented as a dictionary with the following keys:
|
||||
The module is given a copy of the original dictionary, so modifying it from within the
|
||||
module cannot modify a user's profile when included in user directory search results.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_registration_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_registration_for_spam(
|
||||
email_threepid: Optional[dict],
|
||||
@@ -179,8 +244,16 @@ The arguments passed to this callback are:
|
||||
used during the registration process.
|
||||
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `RegistrationBehaviour.ALLOW`, Synapse falls through to the next one.
|
||||
The value of the first callback that does not return `RegistrationBehaviour.ALLOW` will
|
||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
||||
this callback.
|
||||
|
||||
### `check_media_file_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_media_file_for_spam(
|
||||
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
|
||||
@@ -191,6 +264,11 @@ async def check_media_file_for_spam(
|
||||
Called when storing a local or remote file. The module must return a boolean indicating
|
||||
whether the given file can be stored in the homeserver's media store.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the spam checker callback
|
||||
|
||||
@@ -10,6 +10,8 @@ The available third party rules callbacks are:
|
||||
|
||||
### `check_event_allowed`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_event_allowed(
|
||||
event: "synapse.events.EventBase",
|
||||
@@ -44,8 +46,15 @@ dictionary, and modify the returned dictionary accordingly.
|
||||
Note that replacing the event only works for events sent by local users, not for events
|
||||
received over federation.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `on_create_room`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def on_create_room(
|
||||
requester: "synapse.types.Requester",
|
||||
@@ -63,8 +72,16 @@ the request is a server admin.
|
||||
Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
|
||||
or deny the room's creation by raising a `module_api.errors.SynapseError`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns without raising an exception, Synapse falls through to the next one. The
|
||||
room creation will be forbidden as soon as one of the callbacks raises an exception. If
|
||||
this happens, Synapse will not call any of the subsequent implementations of this
|
||||
callback.
|
||||
|
||||
### `check_threepid_can_be_invited`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_threepid_can_be_invited(
|
||||
medium: str,
|
||||
@@ -76,8 +93,15 @@ async def check_threepid_can_be_invited(
|
||||
Called when processing an invite via a third-party identifier (i.e. email or phone number).
|
||||
The module must return a boolean indicating whether the invite can go through.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_visibility_can_be_modified`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_visibility_can_be_modified(
|
||||
room_id: str,
|
||||
@@ -90,6 +114,11 @@ Called when changing the visibility of a room in the local public room directory
|
||||
visibility is a string that's either "public" or "private". The module must return a
|
||||
boolean indicating whether the change can go through.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the third-party rules callback
|
||||
|
||||
@@ -12,6 +12,21 @@ configuration associated with the module in Synapse's configuration file.
|
||||
See the documentation for the `ModuleApi` class
|
||||
[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
|
||||
|
||||
## When Synapse runs with several modules configured
|
||||
|
||||
If Synapse is running with other modules configured, the order each module appears in
|
||||
within the `modules` section of the Synapse configuration file might restrict what it can
|
||||
or cannot register. See [this section](index.html#using-multiple-modules) for more
|
||||
information.
|
||||
|
||||
On top of the rules listed in the link above, if a callback returns a value that should
|
||||
cause the current operation to fail (e.g. if a callback checking an event returns with a
|
||||
value that should cause the event to be denied), Synapse will fail the operation and
|
||||
ignore any subsequent callbacks that should have been run after this one.
|
||||
|
||||
The documentation for each callback mentions how Synapse behaves when
|
||||
multiple modules implement it.
|
||||
|
||||
## Handling the module's configuration
|
||||
|
||||
A module can implement the following static method:
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
<h2 style="color:red">
|
||||
This page of the Synapse documentation is now deprecated. For up to date
|
||||
documentation on setting up or writing a password auth provider module, please see
|
||||
<a href="modules.md">this page</a>.
|
||||
</h2>
|
||||
|
||||
# Password auth provider modules
|
||||
|
||||
Password auth providers offer a way for server administrators to
|
||||
|
||||
+42
-69
@@ -472,6 +472,48 @@ limit_remote_rooms:
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
# A list of domains that the domain portion of 'next_link' parameters
|
||||
# must match.
|
||||
#
|
||||
# This parameter is optionally provided by clients while requesting
|
||||
# validation of an email or phone number, and maps to a link that
|
||||
# users will be automatically redirected to after validation
|
||||
# succeeds. Clients can make use this parameter to aid the validation
|
||||
# process.
|
||||
#
|
||||
# The whitelist is applied whether the homeserver or an
|
||||
# identity server is handling validation.
|
||||
#
|
||||
# The default value is no whitelist functionality; all domains are
|
||||
# allowed. Setting this value to an empty list will instead disallow
|
||||
# all domains.
|
||||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
@@ -541,47 +583,6 @@ retention:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
# A list of domains that the domain portion of 'next_link' parameters
|
||||
# must match.
|
||||
#
|
||||
# This parameter is optionally provided by clients while requesting
|
||||
# validation of an email or phone number, and maps to a link that
|
||||
# users will be automatically redirected to after validation
|
||||
# succeeds. Clients can make use this parameter to aid the validation
|
||||
# process.
|
||||
#
|
||||
# The whitelist is applied whether the homeserver or an
|
||||
# identity server is handling validation.
|
||||
#
|
||||
# The default value is no whitelist functionality; all domains are
|
||||
# allowed. Setting this value to an empty list will instead disallow
|
||||
# all domains.
|
||||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -2260,34 +2261,6 @@ email:
|
||||
#email_validation: "[%(server_name)s] Validate your email"
|
||||
|
||||
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
# respectively.
|
||||
#
|
||||
password_providers:
|
||||
# # Example config for an LDAP auth provider
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
|
||||
|
||||
|
||||
## Push ##
|
||||
|
||||
|
||||
+31
-31
@@ -187,8 +187,8 @@ of this endpoint modifying the media store.
|
||||
|
||||
The current third-party rules module interface is deprecated in favour of the new generic
|
||||
modules system introduced in Synapse v1.37.0. Authors of third-party rules modules can refer
|
||||
to [this documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface)
|
||||
to update their modules. Synapse administrators can refer to [this documentation](modules.md#using-modules)
|
||||
to [this documentation](modules/porting_legacy_module.md)
|
||||
to update their modules. Synapse administrators can refer to [this documentation](modules/index.md)
|
||||
to update their configuration once the modules they are using have been updated.
|
||||
|
||||
We plan to remove support for the current third-party rules interface in September 2021.
|
||||
@@ -237,9 +237,9 @@ SQLite databases are unaffected by this change.
|
||||
|
||||
The current spam checker interface is deprecated in favour of a new generic modules system.
|
||||
Authors of spam checker modules can refer to [this
|
||||
documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface)
|
||||
documentation](modules/porting_legacy_module.md
|
||||
to update their modules. Synapse administrators can refer to [this
|
||||
documentation](modules.md#using-modules)
|
||||
documentation](modules/index.md)
|
||||
to update their configuration once the modules they are using have been updated.
|
||||
|
||||
We plan to remove support for the current spam checker interface in August 2021.
|
||||
@@ -348,24 +348,24 @@ Please ensure your Application Services are up to date.
|
||||
## Requirement for X-Forwarded-Proto header
|
||||
|
||||
When using Synapse with a reverse proxy (in particular, when using the
|
||||
[x_forwarded]{.title-ref} option on an HTTP listener), Synapse now
|
||||
expects to receive an [X-Forwarded-Proto]{.title-ref} header on incoming
|
||||
`x_forwarded` option on an HTTP listener), Synapse now
|
||||
expects to receive an `X-Forwarded-Proto` header on incoming
|
||||
HTTP requests. If it is not set, Synapse will log a warning on each
|
||||
received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure
|
||||
that the reverse proxy sets [X-Forwarded-Proto]{.title-ref} header to
|
||||
[https]{.title-ref} or [http]{.title-ref} to indicate the protocol used
|
||||
that the reverse proxy sets `X-Forwarded-Proto` header to
|
||||
`https` or `http` to indicate the protocol used
|
||||
by the client.
|
||||
|
||||
Synapse also requires the [Host]{.title-ref} header to be preserved.
|
||||
Synapse also requires the `Host` header to be preserved.
|
||||
|
||||
See the [reverse proxy documentation](reverse_proxy.md), where the
|
||||
example configurations have been updated to show how to set these
|
||||
headers.
|
||||
|
||||
(Users of [Caddy](https://caddyserver.com/) are unaffected, since we
|
||||
believe it sets [X-Forwarded-Proto]{.title-ref} by default.)
|
||||
believe it sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
# Upgrading to v1.27.0
|
||||
|
||||
@@ -529,13 +529,13 @@ mapping provider to specify different algorithms, instead of the
|
||||
way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>).
|
||||
|
||||
If your Synapse configuration uses a custom mapping provider
|
||||
([oidc_config.user_mapping_provider.module]{.title-ref} is specified and
|
||||
(`oidc_config.user_mapping_provider.module` is specified and
|
||||
not equal to
|
||||
[synapse.handlers.oidc_handler.JinjaOidcMappingProvider]{.title-ref})
|
||||
then you *must* ensure that [map_user_attributes]{.title-ref} of the
|
||||
`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`)
|
||||
then you *must* ensure that `map_user_attributes` of the
|
||||
mapping provider performs some normalisation of the
|
||||
[localpart]{.title-ref} returned. To match previous behaviour you can
|
||||
use the [map_username_to_mxid_localpart]{.title-ref} function provided
|
||||
`localpart` returned. To match previous behaviour you can
|
||||
use the `map_username_to_mxid_localpart` function provided
|
||||
by Synapse. An example is shown below:
|
||||
|
||||
```python
|
||||
@@ -564,7 +564,7 @@ v1.24.0. The Admin API is now only accessible under:
|
||||
|
||||
- `/_synapse/admin/v1`
|
||||
|
||||
The only exception is the [/admin/whois]{.title-ref} endpoint, which is
|
||||
The only exception is the `/admin/whois` endpoint, which is
|
||||
[also available via the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
|
||||
|
||||
@@ -639,7 +639,7 @@ This page will appear to the user after clicking a password reset link
|
||||
that has been emailed to them.
|
||||
|
||||
To complete password reset, the page must include a way to make a
|
||||
[POST]{.title-ref} request to
|
||||
`POST` request to
|
||||
`/_synapse/client/password_reset/{medium}/submit_token` with the query
|
||||
parameters from the original link, presented as a URL-encoded form. See
|
||||
the file itself for more details.
|
||||
@@ -660,18 +660,18 @@ but the parameters are slightly different:
|
||||
|
||||
# Upgrading to v1.18.0
|
||||
|
||||
## Docker [-py3]{.title-ref} suffix will be removed in future versions
|
||||
## Docker `-py3` suffix will be removed in future versions
|
||||
|
||||
From 10th August 2020, we will no longer publish Docker images with the
|
||||
[-py3]{.title-ref} tag suffix. The images tagged with the
|
||||
[-py3]{.title-ref} suffix have been identical to the non-suffixed tags
|
||||
`-py3` tag suffix. The images tagged with the
|
||||
`-py3` suffix have been identical to the non-suffixed tags
|
||||
since release 0.99.0, and the suffix is obsolete.
|
||||
|
||||
On 10th August, we will remove the [latest-py3]{.title-ref} tag.
|
||||
Existing per-release tags (such as [v1.18.0-py3]{.title-ref}) will not
|
||||
be removed, but no new [-py3]{.title-ref} tags will be added.
|
||||
On 10th August, we will remove the `latest-py3` tag.
|
||||
Existing per-release tags (such as `v1.18.0-py3` will not
|
||||
be removed, but no new `-py3` tags will be added.
|
||||
|
||||
Scripts relying on the [-py3]{.title-ref} suffix will need to be
|
||||
Scripts relying on the `-py3` suffix will need to be
|
||||
updated.
|
||||
|
||||
## Redis replication is now recommended in lieu of TCP replication
|
||||
@@ -705,8 +705,8 @@ This will *not* be a problem for Synapse installations which were:
|
||||
If completeness of the room directory is a concern, installations which
|
||||
are affected can be repaired as follows:
|
||||
|
||||
1. Run the following sql from a [psql]{.title-ref} or
|
||||
[sqlite3]{.title-ref} console:
|
||||
1. Run the following sql from a `psql` or
|
||||
`sqlite3` console:
|
||||
|
||||
```sql
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
@@ -770,8 +770,8 @@ participating in many rooms.
|
||||
of any problems.
|
||||
|
||||
1. As an initial check to see if you will be affected, you can try
|
||||
running the following query from the [psql]{.title-ref} or
|
||||
[sqlite3]{.title-ref} console. It is safe to run it while Synapse is
|
||||
running the following query from the `psql` or
|
||||
`sqlite3` console. It is safe to run it while Synapse is
|
||||
still running.
|
||||
|
||||
```sql
|
||||
@@ -1353,9 +1353,9 @@ first need to upgrade the database by running:
|
||||
|
||||
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
|
||||
|
||||
Where [<db>]{.title-ref} is the location of the database,
|
||||
[<server_name>]{.title-ref} is the server name as specified in the
|
||||
synapse configuration, and [<signing_key>]{.title-ref} is the location
|
||||
Where `<db>` is the location of the database,
|
||||
`<server_name>` is the server name as specified in the
|
||||
synapse configuration, and `<signing_key>` is the location
|
||||
of the signing key as specified in the synapse configuration.
|
||||
|
||||
This may take some time to complete. Failures of signatures and content
|
||||
|
||||
@@ -149,7 +149,7 @@ POST /_synapse/admin/v1/registration_tokens/new
|
||||
|
||||
The request body must be a JSON object and can contain the following fields:
|
||||
- `token`: The registration token. A string of no more than 64 characters that
|
||||
consists only of characters matched by the regex `[A-Za-z0-9-_]`.
|
||||
consists only of characters matched by the regex `[A-Za-z0-9._~-]`.
|
||||
Default: randomly generated.
|
||||
- `uses_allowed`: The integer number of times the token can be used to complete
|
||||
a registration before it becomes invalid.
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
Below is a sample logging configuration file. This file can be tweaked to control how your
|
||||
homeserver will output logs. A restart of the server is generally required to apply any
|
||||
changes made to this file.
|
||||
changes made to this file. The value of the `log_config` option in your homeserver
|
||||
config should be the path to this file.
|
||||
|
||||
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
Note that a default logging configuration (shown below) is created automatically alongside
|
||||
the homeserver config when following the [installation instructions](../../setup/installation.md).
|
||||
It should be named `<SERVERNAME>.log.config` by default.
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_log_config.yaml}}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -22,8 +22,11 @@ files =
|
||||
synapse/crypto,
|
||||
synapse/event_auth.py,
|
||||
synapse/events/builder.py,
|
||||
synapse/events/presence_router.py,
|
||||
synapse/events/snapshot.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/events/third_party_rules.py,
|
||||
synapse/events/utils.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
@@ -53,6 +56,7 @@ files =
|
||||
synapse/storage/_base.py,
|
||||
synapse/storage/background_updates.py,
|
||||
synapse/storage/databases/main/appservice.py,
|
||||
synapse/storage/databases/main/client_ips.py,
|
||||
synapse/storage/databases/main/events.py,
|
||||
synapse/storage/databases/main/keys.py,
|
||||
synapse/storage/databases/main/pusher.py,
|
||||
@@ -88,11 +92,23 @@ files =
|
||||
tests/handlers/test_user_directory.py,
|
||||
tests/rest/client/test_login.py,
|
||||
tests/rest/client/test_auth.py,
|
||||
tests/rest/client/test_relations.py,
|
||||
tests/rest/media/v1/test_filepath.py,
|
||||
tests/rest/media/v1/test_oembed.py,
|
||||
tests/storage/test_state.py,
|
||||
tests/storage/test_user_directory.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-synapse.api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.crypto.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.events.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -108,6 +124,9 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.state.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.client_ips]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ DISTS = (
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
"ubuntu:impish", # 21.10 (EOL 2022-07)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
+25
-7
@@ -51,13 +51,19 @@ Example usage:
|
||||
"request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-K",
|
||||
"--signing-key",
|
||||
help="The private ed25519 key to sign the request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
default="homeserver.yaml",
|
||||
help=(
|
||||
"Path to synapse config file, from which the server name and/or signing "
|
||||
"key path will be read. Ignored if --server-name and --signing-key-path "
|
||||
"key path will be read. Ignored if --server-name and --signing-key(-path) "
|
||||
"are both given."
|
||||
),
|
||||
)
|
||||
@@ -87,11 +93,14 @@ Example usage:
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
if not args.server_name or not (args.signing_key_path or args.signing_key):
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
if args.signing_key:
|
||||
keys = read_signing_keys([args.signing_key])
|
||||
else:
|
||||
with open(args.signing_key_path) as f:
|
||||
keys = read_signing_keys(f)
|
||||
|
||||
json_to_sign = args.input_data
|
||||
if json_to_sign is None:
|
||||
@@ -107,7 +116,7 @@ Example usage:
|
||||
print("Input json was not an object", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
sign_json(obj, args.server_name, key)
|
||||
sign_json(obj, args.server_name, keys[0])
|
||||
for c in json_encoder.iterencode(obj):
|
||||
args.output.write(c)
|
||||
args.output.write("\n")
|
||||
@@ -118,8 +127,17 @@ def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
if not args.signing_key_path and not args.signing_key:
|
||||
if "signing_key" in config:
|
||||
args.signing_key = config["signing_key"]
|
||||
elif "signing_key_path" in config:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
else:
|
||||
print(
|
||||
"A signing key must be given on the commandline or in the config file.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
+1
-1
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.45.0"
|
||||
__version__ = "1.45.1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
+11
-3
@@ -245,7 +245,7 @@ class Auth:
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
):
|
||||
) -> None:
|
||||
"""Validates that the app service is allowed to control
|
||||
the given user.
|
||||
|
||||
@@ -618,5 +618,13 @@ class Auth:
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
async def check_auth_blocking(self, *args, **kwargs) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
async def check_auth_blocking(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
threepid: Optional[dict] = None,
|
||||
user_type: Optional[str] = None,
|
||||
requester: Optional[Requester] = None,
|
||||
) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(
|
||||
user_id=user_id, threepid=threepid, user_type=user_type, requester=requester
|
||||
)
|
||||
|
||||
+23
-46
@@ -18,7 +18,7 @@
|
||||
import logging
|
||||
import typing
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from twisted.web import http
|
||||
|
||||
@@ -143,7 +143,7 @@ class SynapseError(CodeMessageException):
|
||||
super().__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode)
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ class ProxiedRequestError(SynapseError):
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
|
||||
@@ -196,7 +196,7 @@ class ConsentNotGivenError(SynapseError):
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||
|
||||
|
||||
@@ -262,14 +262,10 @@ class InteractiveAuthIncompleteError(Exception):
|
||||
class UnrecognizedRequestError(SynapseError):
|
||||
"""An error indicating we don't understand the request you're trying to make"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.UNRECOGNIZED
|
||||
if len(args) == 0:
|
||||
message = "Unrecognized request"
|
||||
else:
|
||||
message = args[0]
|
||||
super().__init__(400, message, **kwargs)
|
||||
def __init__(
|
||||
self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
|
||||
):
|
||||
super().__init__(400, msg, errcode)
|
||||
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
@@ -284,10 +280,8 @@ class AuthError(SynapseError):
|
||||
other poorly-defined times.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.FORBIDDEN
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN):
|
||||
super().__init__(code, msg, errcode)
|
||||
|
||||
|
||||
class InvalidClientCredentialsError(SynapseError):
|
||||
@@ -321,7 +315,7 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||
self._soft_logout = soft_logout
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
d = super().error_dict()
|
||||
d["soft_logout"] = self._soft_logout
|
||||
return d
|
||||
@@ -345,7 +339,7 @@ class ResourceLimitError(SynapseError):
|
||||
self.limit_type = limit_type
|
||||
super().__init__(code, msg, errcode=errcode)
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
@@ -357,32 +351,17 @@ class ResourceLimitError(SynapseError):
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.TOO_LARGE
|
||||
super().__init__(413, *args, **kwargs)
|
||||
|
||||
|
||||
class EventStreamError(SynapseError):
|
||||
"""An error raised when there a problem with the event stream."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.BAD_PAGINATION
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, msg: str):
|
||||
super().__init__(413, msg, Codes.TOO_LARGE)
|
||||
|
||||
|
||||
class LoginError(SynapseError):
|
||||
"""An error raised when there was a problem logging in."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StoreError(SynapseError):
|
||||
"""An error raised when there was a problem storing some data."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InvalidCaptchaError(SynapseError):
|
||||
def __init__(
|
||||
@@ -395,7 +374,7 @@ class InvalidCaptchaError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.error_url = error_url
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, error_url=self.error_url)
|
||||
|
||||
|
||||
@@ -412,7 +391,7 @@ class LimitExceededError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||
|
||||
|
||||
@@ -443,10 +422,8 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
class ThreepidValidationError(SynapseError):
|
||||
"""An error raised when there was a problem authorising an event."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.FORBIDDEN
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, msg: str, errcode: str = Codes.FORBIDDEN):
|
||||
super().__init__(400, msg, errcode)
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
@@ -466,7 +443,7 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
|
||||
self._room_version = room_version
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, room_version=self._room_version)
|
||||
|
||||
|
||||
@@ -494,7 +471,7 @@ class RequestSendFailed(RuntimeError):
|
||||
errors (like programming errors).
|
||||
"""
|
||||
|
||||
def __init__(self, inner_exception, can_retry):
|
||||
def __init__(self, inner_exception: BaseException, can_retry: bool):
|
||||
super().__init__(
|
||||
"Failed to send request: %s: %s"
|
||||
% (type(inner_exception).__name__, inner_exception)
|
||||
@@ -503,7 +480,7 @@ class RequestSendFailed(RuntimeError):
|
||||
self.can_retry = can_retry
|
||||
|
||||
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
|
||||
"""Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
|
||||
@@ -551,7 +528,7 @@ class FederationError(RuntimeError):
|
||||
msg = "%s %s: %s" % (level, code, reason)
|
||||
super().__init__(msg)
|
||||
|
||||
def get_dict(self):
|
||||
def get_dict(self) -> "JsonDict":
|
||||
return {
|
||||
"level": self.level,
|
||||
"code": self.code,
|
||||
@@ -580,7 +557,7 @@ class HttpResponseException(CodeMessageException):
|
||||
super().__init__(code, msg)
|
||||
self.response = response
|
||||
|
||||
def to_synapse_error(self):
|
||||
def to_synapse_error(self) -> SynapseError:
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
|
||||
@@ -231,24 +231,24 @@ class FilterCollection:
|
||||
def include_redundant_members(self) -> bool:
|
||||
return self._room_state_filter.include_redundant_members()
|
||||
|
||||
def filter_presence(self, events):
|
||||
def filter_presence(
|
||||
self, events: Iterable[UserPresenceState]
|
||||
) -> List[UserPresenceState]:
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
def filter_account_data(self, events):
|
||||
def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._account_data.filter(events)
|
||||
|
||||
def filter_room_state(self, events):
|
||||
def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
|
||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_timeline(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||
def filter_room_timeline(self, events: Iterable[EventBase]) -> List[EventBase]:
|
||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_ephemeral(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||
def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_account_data(
|
||||
self, events: Iterable[FilterEvent]
|
||||
) -> List[FilterEvent]:
|
||||
def filter_room_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
def blocks_all_presence(self) -> bool:
|
||||
@@ -309,7 +309,7 @@ class Filter:
|
||||
# except for presence which actually gets passed around as its own
|
||||
# namedtuple type.
|
||||
if isinstance(event, UserPresenceState):
|
||||
sender = event.user_id
|
||||
sender: Optional[str] = event.user_id
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
contains_url = False
|
||||
|
||||
+25
-26
@@ -12,49 +12,48 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.types import JsonDict
|
||||
|
||||
|
||||
class UserPresenceState(
|
||||
namedtuple(
|
||||
"UserPresenceState",
|
||||
(
|
||||
"user_id",
|
||||
"state",
|
||||
"last_active_ts",
|
||||
"last_federation_update_ts",
|
||||
"last_user_sync_ts",
|
||||
"status_msg",
|
||||
"currently_active",
|
||||
),
|
||||
)
|
||||
):
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class UserPresenceState:
|
||||
"""Represents the current presence state of the user.
|
||||
|
||||
user_id (str)
|
||||
last_active (int): Time in msec that the user last interacted with server.
|
||||
last_federation_update (int): Time in msec since either a) we sent a presence
|
||||
user_id
|
||||
last_active: Time in msec that the user last interacted with server.
|
||||
last_federation_update: Time in msec since either a) we sent a presence
|
||||
update to other servers or b) we received a presence update, depending
|
||||
on if is a local user or not.
|
||||
last_user_sync (int): Time in msec that the user last *completed* a sync
|
||||
last_user_sync: Time in msec that the user last *completed* a sync
|
||||
(or event stream).
|
||||
status_msg (str): User set status message.
|
||||
status_msg: User set status message.
|
||||
"""
|
||||
|
||||
def as_dict(self):
|
||||
return dict(self._asdict())
|
||||
user_id: str
|
||||
state: str
|
||||
last_active_ts: int
|
||||
last_federation_update_ts: int
|
||||
last_user_sync_ts: int
|
||||
status_msg: Optional[str]
|
||||
currently_active: bool
|
||||
|
||||
def as_dict(self) -> JsonDict:
|
||||
return attr.asdict(self)
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d):
|
||||
def from_dict(d: JsonDict) -> "UserPresenceState":
|
||||
return UserPresenceState(**d)
|
||||
|
||||
def copy_and_replace(self, **kwargs):
|
||||
return self._replace(**kwargs)
|
||||
def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState":
|
||||
return attr.evolve(self, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def default(cls, user_id):
|
||||
def default(cls, user_id: str) -> "UserPresenceState":
|
||||
"""Returns a default presence state."""
|
||||
return cls(
|
||||
user_id=user_id,
|
||||
|
||||
@@ -161,7 +161,7 @@ class Ratelimiter:
|
||||
|
||||
return allowed, time_allowed
|
||||
|
||||
def _prune_message_counts(self, time_now_s: float):
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
@@ -190,7 +190,7 @@ class Ratelimiter:
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[float] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
|
||||
+5
-8
@@ -19,6 +19,7 @@ from hashlib import sha256
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client"
|
||||
CLIENT_API_PREFIX = "/_matrix/client"
|
||||
@@ -34,11 +35,7 @@ LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
|
||||
class ConsentURIBuilder:
|
||||
def __init__(self, hs_config):
|
||||
"""
|
||||
Args:
|
||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||
"""
|
||||
def __init__(self, hs_config: HomeServerConfig):
|
||||
if hs_config.key.form_secret is None:
|
||||
raise ConfigError("form_secret not set in config")
|
||||
if hs_config.server.public_baseurl is None:
|
||||
@@ -47,15 +44,15 @@ class ConsentURIBuilder:
|
||||
self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.server.public_baseurl
|
||||
|
||||
def build_user_consent_uri(self, user_id):
|
||||
def build_user_consent_uri(self, user_id: str) -> str:
|
||||
"""Build a URI which we can give to the user to do their privacy
|
||||
policy consent
|
||||
|
||||
Args:
|
||||
user_id (str): mxid or username of user
|
||||
user_id: mxid or username of user
|
||||
|
||||
Returns
|
||||
(str) the URI where the user can do consent
|
||||
The URI where the user can do consent
|
||||
"""
|
||||
mac = hmac.new(
|
||||
key=self._hmac_secret, msg=user_id.encode("ascii"), digestmod=sha256
|
||||
|
||||
@@ -42,6 +42,7 @@ from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
@@ -379,6 +380,7 @@ async def start(hs: "HomeServer"):
|
||||
load_legacy_spam_checkers(hs)
|
||||
load_legacy_third_party_event_rules(hs)
|
||||
load_legacy_presence_router(hs)
|
||||
load_legacy_password_auth_providers(hs)
|
||||
|
||||
# If we've configured an expiry time for caches, start the background job now.
|
||||
setup_expire_lru_cache_entries(hs)
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -58,6 +59,7 @@ class AdminCmdSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -185,11 +187,7 @@ def start(config_options):
|
||||
# a full worker config.
|
||||
config.worker.worker_app = "synapse.app.admin_cmd"
|
||||
|
||||
if (
|
||||
not config.worker.worker_daemonize
|
||||
and not config.worker.worker_log_file
|
||||
and not config.worker.worker_log_config
|
||||
):
|
||||
if not config.worker.worker_daemonize and not config.worker.worker_log_config:
|
||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||
# unless we've actually set log config.
|
||||
config.logging.no_redirect_stdio = True
|
||||
@@ -198,9 +196,9 @@ def start(config_options):
|
||||
config.server.update_user_directory = False
|
||||
config.worker.run_background_tasks = False
|
||||
config.worker.start_pushers = False
|
||||
config.pusher_shard_config.instances = []
|
||||
config.worker.pusher_shard_config.instances = []
|
||||
config.worker.send_federation = False
|
||||
config.federation_shard_config.instances = []
|
||||
config.worker.federation_shard_config.instances = []
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
|
||||
@@ -221,7 +219,7 @@ def start(config_options):
|
||||
|
||||
async def run():
|
||||
with LoggingContext("command"):
|
||||
_base.start(ss)
|
||||
await _base.start(ss)
|
||||
await args.func(ss, args)
|
||||
|
||||
_base.start_worker_reactor(
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.config import (
|
||||
redis,
|
||||
registration,
|
||||
repository,
|
||||
retention,
|
||||
room_directory,
|
||||
saml2,
|
||||
server,
|
||||
@@ -91,6 +92,7 @@ class RootConfig:
|
||||
modules: modules.ModulesConfig
|
||||
caches: cache.CacheConfig
|
||||
federation: federation.FederationConfig
|
||||
retention: retention.RetentionConfig
|
||||
|
||||
config_classes: List = ...
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
@@ -24,6 +24,9 @@ class ExperimentalConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs):
|
||||
experimental = config.get("experimental_features") or {}
|
||||
|
||||
# Whether to enable experimental MSC1849 (aka relations) support
|
||||
self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ from .ratelimiting import RatelimitConfig
|
||||
from .redis import RedisConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .retention import RetentionConfig
|
||||
from .room import RoomConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2 import SAML2Config
|
||||
@@ -59,6 +60,7 @@ class HomeServerConfig(RootConfig):
|
||||
config_classes = [
|
||||
ModulesConfig,
|
||||
ServerConfig,
|
||||
RetentionConfig,
|
||||
TlsConfig,
|
||||
FederationConfig,
|
||||
CacheConfig,
|
||||
|
||||
@@ -25,6 +25,29 @@ class PasswordAuthProviderConfig(Config):
|
||||
section = "authproviders"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
"""Parses the old password auth providers config. The config format looks like this:
|
||||
|
||||
password_providers:
|
||||
# Example config for an LDAP auth provider
|
||||
- module: "ldap_auth_provider.LdapAuthProvider"
|
||||
config:
|
||||
enabled: true
|
||||
uri: "ldap://ldap.example.com:389"
|
||||
start_tls: true
|
||||
base: "ou=users,dc=example,dc=com"
|
||||
attributes:
|
||||
uid: "cn"
|
||||
mail: "email"
|
||||
name: "givenName"
|
||||
#bind_dn:
|
||||
#bind_password:
|
||||
#filter: "(objectClass=posixAccount)"
|
||||
|
||||
We expect admins to use modules for this feature (which is why it doesn't appear
|
||||
in the sample config file), but we want to keep support for it around for a bit
|
||||
for backwards compatibility.
|
||||
"""
|
||||
|
||||
self.password_providers: List[Tuple[Type, Any]] = []
|
||||
providers = []
|
||||
|
||||
@@ -49,33 +72,3 @@ class PasswordAuthProviderConfig(Config):
|
||||
)
|
||||
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
# respectively.
|
||||
#
|
||||
password_providers:
|
||||
# # Example config for an LDAP auth provider
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
|
||||
@@ -0,0 +1,226 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RetentionPurgeJob:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
interval: int
|
||||
shortest_max_lifetime: Optional[int]
|
||||
longest_max_lifetime: Optional[int]
|
||||
|
||||
|
||||
class RetentionConfig(Config):
|
||||
section = "retention"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
if self.retention_enabled:
|
||||
logger.info(
|
||||
"Message retention policies support enabled with the following default"
|
||||
" policy: min_lifetime = %s ; max_lifetime = %s",
|
||||
self.retention_default_min_lifetime,
|
||||
self.retention_default_max_lifetime,
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get(
|
||||
"allowed_lifetime_max"
|
||||
)
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min
|
||||
> self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs: List[RetentionPurgeJob] = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
RetentionPurgeJob(self.parse_duration("1d"), None, None)
|
||||
]
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, and the state of a room contains a
|
||||
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
|
||||
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
|
||||
# to these limits when running purge jobs.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
# If any purge job is configured, it is strongly recommended to have at least
|
||||
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
|
||||
# set, or one job without 'shortest_max_lifetime' and one job without
|
||||
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
|
||||
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
|
||||
# room's policy to these values is done after the policies are retrieved from
|
||||
# Synapse's database (which is done using the range specified in a purge job's
|
||||
# configuration).
|
||||
#
|
||||
#purge_jobs:
|
||||
# - longest_max_lifetime: 3d
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
"""
|
||||
@@ -225,15 +225,6 @@ class ManholeConfig:
|
||||
pub_key: Optional[Key]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RetentionConfig:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
interval: int
|
||||
shortest_max_lifetime: Optional[int]
|
||||
longest_max_lifetime: Optional[int]
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class LimitRemoteRoomsConfig:
|
||||
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
||||
@@ -376,11 +367,6 @@ class ServerConfig(Config):
|
||||
# (other than those sent by local server admins)
|
||||
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
|
||||
|
||||
# Whether to enable experimental MSC1849 (aka relations) support
|
||||
self.experimental_msc1849_support_enabled = config.get(
|
||||
"experimental_msc1849_support_enabled", True
|
||||
)
|
||||
|
||||
# Options to control access by tracking MAU
|
||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||
self.max_mau_value = 0
|
||||
@@ -466,124 +452,6 @@ class ServerConfig(Config):
|
||||
# events with profile information that differ from the target's global profile.
|
||||
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
||||
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
if self.retention_enabled:
|
||||
logger.info(
|
||||
"Message retention policies support enabled with the following default"
|
||||
" policy: min_lifetime = %s ; max_lifetime = %s",
|
||||
self.retention_default_min_lifetime,
|
||||
self.retention_default_max_lifetime,
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get(
|
||||
"allowed_lifetime_max"
|
||||
)
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min
|
||||
> self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs: List[RetentionConfig] = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
RetentionConfig(self.parse_duration("1d"), None, None)
|
||||
]
|
||||
|
||||
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
@@ -1255,75 +1123,6 @@ class ServerConfig(Config):
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, and the state of a room contains a
|
||||
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
|
||||
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
|
||||
# to these limits when running purge jobs.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
# If any purge job is configured, it is strongly recommended to have at least
|
||||
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
|
||||
# set, or one job without 'shortest_max_lifetime' and one job without
|
||||
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
|
||||
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
|
||||
# room's policy to these values is done after the policies are retrieved from
|
||||
# Synapse's database (which is done using the range specified in a purge job's
|
||||
# configuration).
|
||||
#
|
||||
#purge_jobs:
|
||||
# - longest_max_lifetime: 3d
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
|
||||
@@ -29,9 +29,12 @@ from twisted.internet.ssl import (
|
||||
TLSVersion,
|
||||
platformTrust,
|
||||
)
|
||||
from twisted.protocols.tls import TLSMemoryBIOProtocol
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.iweb import IPolicyForHTTPS
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -51,7 +54,7 @@ class ServerContextFactory(ContextFactory):
|
||||
per https://github.com/matrix-org/synapse/issues/1691
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
# TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version,
|
||||
# switch to those (see https://github.com/pyca/cryptography/issues/5379).
|
||||
#
|
||||
@@ -64,7 +67,7 @@ class ServerContextFactory(ContextFactory):
|
||||
self.configure_context(self._context, config)
|
||||
|
||||
@staticmethod
|
||||
def configure_context(context, config):
|
||||
def configure_context(context: SSL.Context, config: HomeServerConfig) -> None:
|
||||
try:
|
||||
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||
context.set_tmp_ecdh(_ecCurve)
|
||||
@@ -75,14 +78,15 @@ class ServerContextFactory(ContextFactory):
|
||||
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
|
||||
)
|
||||
context.use_certificate_chain_file(config.tls.tls_certificate_file)
|
||||
assert config.tls.tls_private_key is not None
|
||||
context.use_privatekey(config.tls.tls_private_key)
|
||||
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
context.set_cipher_list(
|
||||
"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM"
|
||||
b"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM"
|
||||
)
|
||||
|
||||
def getContext(self):
|
||||
def getContext(self) -> SSL.Context:
|
||||
return self._context
|
||||
|
||||
|
||||
@@ -98,7 +102,7 @@ class FederationPolicyForHTTPS:
|
||||
constructs an SSLClientConnectionCreator factory accordingly.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
self._config = config
|
||||
|
||||
# Check if we're using a custom list of a CA certificates
|
||||
@@ -131,7 +135,7 @@ class FederationPolicyForHTTPS:
|
||||
self._config.tls.federation_certificate_verification_whitelist
|
||||
)
|
||||
|
||||
def get_options(self, host: bytes):
|
||||
def get_options(self, host: bytes) -> IOpenSSLClientConnectionCreator:
|
||||
# IPolicyForHTTPS.get_options takes bytes, but we want to compare
|
||||
# against the str whitelist. The hostnames in the whitelist are already
|
||||
# IDNA-encoded like the hosts will be here.
|
||||
@@ -153,7 +157,9 @@ class FederationPolicyForHTTPS:
|
||||
|
||||
return SSLClientConnectionCreator(host, ssl_context, should_verify)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
def creatorForNetloc(
|
||||
self, hostname: bytes, port: int
|
||||
) -> IOpenSSLClientConnectionCreator:
|
||||
"""Implements the IPolicyForHTTPS interface so that this can be passed
|
||||
directly to agents.
|
||||
"""
|
||||
@@ -169,16 +175,18 @@ class RegularPolicyForHTTPS:
|
||||
trust root.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
trust_root = platformTrust()
|
||||
self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
|
||||
self._ssl_context.set_info_callback(_context_info_cb)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
def creatorForNetloc(
|
||||
self, hostname: bytes, port: int
|
||||
) -> IOpenSSLClientConnectionCreator:
|
||||
return SSLClientConnectionCreator(hostname, self._ssl_context, True)
|
||||
|
||||
|
||||
def _context_info_cb(ssl_connection, where, ret):
|
||||
def _context_info_cb(ssl_connection: SSL.Connection, where: int, ret: int) -> None:
|
||||
"""The 'information callback' for our openssl context objects.
|
||||
|
||||
Note: Once this is set as the info callback on a Context object, the Context should
|
||||
@@ -204,11 +212,13 @@ class SSLClientConnectionCreator:
|
||||
Replaces twisted.internet.ssl.ClientTLSOptions
|
||||
"""
|
||||
|
||||
def __init__(self, hostname: bytes, ctx, verify_certs: bool):
|
||||
def __init__(self, hostname: bytes, ctx: SSL.Context, verify_certs: bool):
|
||||
self._ctx = ctx
|
||||
self._verifier = ConnectionVerifier(hostname, verify_certs)
|
||||
|
||||
def clientConnectionForTLS(self, tls_protocol):
|
||||
def clientConnectionForTLS(
|
||||
self, tls_protocol: TLSMemoryBIOProtocol
|
||||
) -> SSL.Connection:
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
|
||||
@@ -219,7 +229,7 @@ class SSLClientConnectionCreator:
|
||||
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||
# tls_protocol so that the SSL context's info callback has something to
|
||||
# call to do the cert verification.
|
||||
tls_protocol._synapse_tls_verifier = self._verifier
|
||||
tls_protocol._synapse_tls_verifier = self._verifier # type: ignore[attr-defined]
|
||||
return connection
|
||||
|
||||
|
||||
@@ -244,7 +254,9 @@ class ConnectionVerifier:
|
||||
self._hostnameBytes = hostname
|
||||
self._hostnameASCII = self._hostnameBytes.decode("ascii")
|
||||
|
||||
def verify_context_info_cb(self, ssl_connection, where):
|
||||
def verify_context_info_cb(
|
||||
self, ssl_connection: SSL.Connection, where: int
|
||||
) -> None:
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
|
||||
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ def compute_content_hash(
|
||||
|
||||
|
||||
def compute_event_reference_hash(
|
||||
event, hash_algorithm: Hasher = hashlib.sha256
|
||||
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
|
||||
) -> Tuple[str, bytes]:
|
||||
"""Computes the event reference hash. This is the hash of the redacted
|
||||
event.
|
||||
|
||||
@@ -87,7 +87,7 @@ class VerifyJsonRequest:
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
) -> "VerifyJsonRequest":
|
||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||
object for the given server.
|
||||
"""
|
||||
@@ -104,7 +104,7 @@ class VerifyJsonRequest:
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
) -> "VerifyJsonRequest":
|
||||
"""Create a VerifyJsonRequest to verify all signatures on an event
|
||||
object for the given server.
|
||||
"""
|
||||
@@ -449,7 +449,9 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]):
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
key_ids_to_fetch = (
|
||||
(queue_value.server_name, key_id)
|
||||
for queue_value in keys_to_fetch
|
||||
|
||||
+15
-18
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -113,7 +113,7 @@ def validate_event_for_room_version(
|
||||
|
||||
|
||||
def check_auth_rules_for_event(
|
||||
room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase]
|
||||
room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase]
|
||||
) -> None:
|
||||
"""Check that an event complies with the auth rules
|
||||
|
||||
@@ -137,8 +137,6 @@ def check_auth_rules_for_event(
|
||||
Raises:
|
||||
AuthError if the checks fail
|
||||
"""
|
||||
assert isinstance(auth_events, dict)
|
||||
|
||||
# We need to ensure that the auth events are actually for the same room, to
|
||||
# stop people from using powers they've been granted in other rooms for
|
||||
# example.
|
||||
@@ -147,7 +145,7 @@ def check_auth_rules_for_event(
|
||||
# the state res algorithm isn't silly enough to give us events from different rooms.
|
||||
# Still, it's easier to do it anyway.
|
||||
room_id = event.room_id
|
||||
for auth_event in auth_events.values():
|
||||
for auth_event in auth_events:
|
||||
if auth_event.room_id != room_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
@@ -186,8 +184,10 @@ def check_auth_rules_for_event(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
auth_dict = {(e.type, e.state_key): e for e in auth_events}
|
||||
|
||||
# 3. If event does not have a m.room.create in its auth_events, reject.
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
creation_event = auth_dict.get((EventTypes.Create, ""), None)
|
||||
if not creation_event:
|
||||
raise AuthError(403, "No create event in auth events")
|
||||
|
||||
@@ -195,7 +195,7 @@ def check_auth_rules_for_event(
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
if not _can_federate(event, auth_dict):
|
||||
raise AuthError(403, "This room has been marked as unfederatable.")
|
||||
|
||||
# 4. If type is m.room.aliases
|
||||
@@ -217,23 +217,20 @@ def check_auth_rules_for_event(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
|
||||
# 5. If type is m.room.membership
|
||||
if event.type == EventTypes.Member:
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_events)
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_dict)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_check_event_sender_in_room(event, auth_events)
|
||||
_check_event_sender_in_room(event, auth_dict)
|
||||
|
||||
# Special case to allow m.room.third_party_invite events wherever
|
||||
# a user is allowed to issue invites. Fixes
|
||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||
if event.type == EventTypes.ThirdPartyInvite:
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
invite_level = get_named_level(auth_events, "invite", 0)
|
||||
user_level = get_user_power_level(event.user_id, auth_dict)
|
||||
invite_level = get_named_level(auth_dict, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
@@ -241,20 +238,20 @@ def check_auth_rules_for_event(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_can_send_event(event, auth_events)
|
||||
_can_send_event(event, auth_dict)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
_check_power_levels(room_version_obj, event, auth_events)
|
||||
_check_power_levels(room_version_obj, event, auth_dict)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(room_version_obj, event, auth_events)
|
||||
check_redaction(room_version_obj, event, auth_dict)
|
||||
|
||||
if (
|
||||
event.type == EventTypes.MSC2716_INSERTION
|
||||
or event.type == EventTypes.MSC2716_BATCH
|
||||
or event.type == EventTypes.MSC2716_MARKER
|
||||
):
|
||||
check_historical(room_version_obj, event, auth_events)
|
||||
check_historical(room_version_obj, event, auth_dict)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
|
||||
@@ -348,12 +348,16 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||
self.__class__.__name__,
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
self.internal_metadata.is_outlier(),
|
||||
rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
|
||||
|
||||
return (
|
||||
f"<{self.__class__.__name__} "
|
||||
f"{rejection}"
|
||||
f"event_id={self.event_id}, "
|
||||
f"type={self.get('type')}, "
|
||||
f"state_key={self.get('state_key')}, "
|
||||
f"outlier={self.internal_metadata.is_outlier()}"
|
||||
">"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -90,13 +90,13 @@ class EventBuilder:
|
||||
)
|
||||
|
||||
@property
|
||||
def state_key(self):
|
||||
def state_key(self) -> str:
|
||||
if self._state_key is not None:
|
||||
return self._state_key
|
||||
|
||||
raise AttributeError("state_key")
|
||||
|
||||
def is_state(self):
|
||||
def is_state(self) -> bool:
|
||||
return self._state_key is not None
|
||||
|
||||
async def build(
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
@@ -33,14 +34,13 @@ if TYPE_CHECKING:
|
||||
GET_USERS_FOR_STATES_CALLBACK = Callable[
|
||||
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
|
||||
]
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[
|
||||
[str], Awaitable[Union[Set[str], "PresenceRouter.ALL_USERS"]]
|
||||
]
|
||||
# This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer"):
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
@@ -69,9 +69,10 @@ def load_legacy_presence_router(hs: "HomeServer"):
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# f is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
@@ -104,7 +105,7 @@ class PresenceRouter:
|
||||
self,
|
||||
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
|
||||
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
|
||||
):
|
||||
) -> None:
|
||||
# PresenceRouter modules are required to implement both of these methods
|
||||
# or neither of them as they are assumed to act in a complementary manner
|
||||
paired_methods = [get_users_for_states, get_interested_users]
|
||||
@@ -142,7 +143,7 @@ class PresenceRouter:
|
||||
# Don't include any extra destinations for presence updates
|
||||
return {}
|
||||
|
||||
users_for_states = {}
|
||||
users_for_states: Dict[str, Set[UserPresenceState]] = {}
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
try:
|
||||
@@ -171,7 +172,7 @@ class PresenceRouter:
|
||||
|
||||
return users_for_states
|
||||
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]:
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], str]:
|
||||
"""
|
||||
Retrieve a list of users that `user_id` is interested in receiving the
|
||||
presence of. This will be in addition to those they share a room with.
|
||||
|
||||
+60
-50
@@ -11,17 +11,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import StateMap
|
||||
from synapse.types import JsonDict, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage import Storage
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
|
||||
@@ -112,13 +115,13 @@ class EventContext:
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
state_group,
|
||||
state_group_before_event,
|
||||
current_state_ids,
|
||||
prev_state_ids,
|
||||
prev_group=None,
|
||||
delta_ids=None,
|
||||
):
|
||||
state_group: Optional[int],
|
||||
state_group_before_event: Optional[int],
|
||||
current_state_ids: Optional[StateMap[str]],
|
||||
prev_state_ids: Optional[StateMap[str]],
|
||||
prev_group: Optional[int] = None,
|
||||
delta_ids: Optional[StateMap[str]] = None,
|
||||
) -> "EventContext":
|
||||
return EventContext(
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
@@ -129,22 +132,22 @@ class EventContext:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier():
|
||||
def for_outlier() -> "EventContext":
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
Args:
|
||||
event (FrozenEvent): The event that this context relates to
|
||||
event: The event that this context relates to
|
||||
|
||||
Returns:
|
||||
dict
|
||||
The serialized event.
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
@@ -170,17 +173,16 @@ class EventContext:
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def deserialize(storage, input):
|
||||
def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
|
||||
Args:
|
||||
storage (Storage): Used to convert AS ID to AS object and fetch
|
||||
state.
|
||||
input (dict): A dict produced by `serialize`
|
||||
storage: Used to convert AS ID to AS object and fetch state.
|
||||
input: A dict produced by `serialize`
|
||||
|
||||
Returns:
|
||||
EventContext
|
||||
The event context.
|
||||
"""
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
@@ -241,22 +243,25 @@ class EventContext:
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
|
||||
async def get_prev_state_ids(self):
|
||||
async def get_prev_state_ids(self) -> StateMap[str]:
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
Returns {} if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
await self._ensure_fetched()
|
||||
# There *should* be previous state IDs now.
|
||||
assert self._prev_state_ids is not None
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
@@ -264,16 +269,17 @@ class EventContext:
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if we haven't cached the
|
||||
state or if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
Returns None if we haven't cached the state or if state_group is None
|
||||
(which happens when the associated event is an outlier).
|
||||
|
||||
Otherwise, returns the the current state IDs.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
async def _ensure_fetched(self):
|
||||
async def _ensure_fetched(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
@@ -285,46 +291,46 @@ class _AsyncEventContextImpl(EventContext):
|
||||
|
||||
Attributes:
|
||||
|
||||
_storage (Storage)
|
||||
_storage
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
|
||||
None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
_event_type: The type of the event the context is associated with.
|
||||
|
||||
_event_state_key (str): The state_key of the event the context is
|
||||
associated with.
|
||||
_event_state_key: The state_key of the event the context is associated with.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
_prev_state_id: If the event associated with the context is a state event,
|
||||
then `_prev_state_id` is the event_id of the state that was replaced.
|
||||
"""
|
||||
|
||||
# This needs to have a default as we're inheriting
|
||||
_storage = attr.ib(default=None)
|
||||
_prev_state_id = attr.ib(default=None)
|
||||
_event_type = attr.ib(default=None)
|
||||
_event_state_key = attr.ib(default=None)
|
||||
_fetching_state_deferred = attr.ib(default=None)
|
||||
_storage: "Storage" = attr.ib(default=None)
|
||||
_prev_state_id: Optional[str] = attr.ib(default=None)
|
||||
_event_type: str = attr.ib(default=None)
|
||||
_event_state_key: Optional[str] = attr.ib(default=None)
|
||||
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
|
||||
|
||||
async def _ensure_fetched(self):
|
||||
async def _ensure_fetched(self) -> None:
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
return await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
async def _fill_out_state(self):
|
||||
async def _fill_out_state(self) -> None:
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
return
|
||||
|
||||
self._current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
# Set this separately so mypy knows current_state_ids is not None.
|
||||
self._current_state_ids = current_state_ids
|
||||
if self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(self._current_state_ids)
|
||||
self._prev_state_ids = dict(current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
@@ -332,10 +338,12 @@ class _AsyncEventContextImpl(EventContext):
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
self._prev_state_ids = current_state_ids
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
def _encode_state_dict(
|
||||
state_dict: Optional[StateMap[str]],
|
||||
) -> Optional[List[Tuple[str, str, str]]]:
|
||||
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||
JSON we need to convert them to a form that can.
|
||||
"""
|
||||
@@ -345,7 +353,9 @@ def _encode_state_dict(state_dict):
|
||||
return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]
|
||||
|
||||
|
||||
def _decode_state_dict(input):
|
||||
def _decode_state_dict(
|
||||
input: Optional[List[Tuple[str, str, str]]]
|
||||
) -> Optional[StateMap[str]]:
|
||||
"""Decodes a state dict encoded using `_encode_state_dict` above"""
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
+14
-11
@@ -77,7 +77,7 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
"""Wrapper that loads spam checkers configured using the old configuration, and
|
||||
registers the spam checker hooks they implement.
|
||||
"""
|
||||
@@ -129,9 +129,9 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str],
|
||||
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
|
||||
# We've already made sure f is not None above, but mypy doesn't
|
||||
# do well across function boundaries so we need to tell it f is
|
||||
# definitely not None.
|
||||
# Assertion required because mypy can't prove we won't
|
||||
# change `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return f(
|
||||
@@ -146,9 +146,10 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
"Bad signature for callback check_registration_for_spam",
|
||||
)
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# wrapped_func is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert wrapped_func is not None
|
||||
|
||||
return maybe_awaitable(wrapped_func(*args, **kwargs))
|
||||
@@ -165,7 +166,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
@@ -209,7 +210,7 @@ class SpamChecker:
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = None,
|
||||
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if check_event_for_spam is not None:
|
||||
self._check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
@@ -275,7 +276,9 @@ class SpamChecker:
|
||||
|
||||
return False
|
||||
|
||||
async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool):
|
||||
async def user_may_join_room(
|
||||
self, user_id: str, room_id: str, is_invited: bool
|
||||
) -> bool:
|
||||
"""Checks if a given users is allowed to join a room.
|
||||
Not called when a user creates a room.
|
||||
|
||||
@@ -285,7 +288,7 @@ class SpamChecker:
|
||||
is_invited: Whether the user is invited into the room
|
||||
|
||||
Returns:
|
||||
bool: Whether the user may join the room
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
if await callback(user_id, room_id, is_invited) is False:
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
@@ -38,7 +38,7 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a third party event rules module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
@@ -77,9 +77,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
event: EventBase,
|
||||
state_events: StateMap[EventBase],
|
||||
) -> Tuple[bool, Optional[dict]]:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(event, state_events)
|
||||
@@ -98,9 +98,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
async def wrap_on_create_room(
|
||||
requester: Requester, config: dict, is_requester_admin: bool
|
||||
) -> None:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(requester, config, is_requester_admin)
|
||||
@@ -112,9 +112,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
|
||||
return wrap_on_create_room
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# f is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
@@ -162,7 +163,7 @@ class ThirdPartyEventRules:
|
||||
check_visibility_can_be_modified: Optional[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Register callbacks from modules for each hook."""
|
||||
if check_event_allowed is not None:
|
||||
self._check_event_allowed_callbacks.append(check_event_allowed)
|
||||
|
||||
+69
-50
@@ -13,18 +13,32 @@
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
import re
|
||||
from typing import Any, Mapping, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from . import EventBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
# by a match for 'stuff'.
|
||||
@@ -65,7 +79,7 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
return pruned_event
|
||||
|
||||
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
|
||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||
operates on dicts rather than event objects
|
||||
|
||||
@@ -97,7 +111,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
|
||||
new_content = {}
|
||||
|
||||
def add_fields(*fields):
|
||||
def add_fields(*fields: str) -> None:
|
||||
for field in fields:
|
||||
if field in event_dict["content"]:
|
||||
new_content[field] = event_dict["content"][field]
|
||||
@@ -151,7 +165,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
|
||||
allowed_fields["content"] = new_content
|
||||
|
||||
unsigned = {}
|
||||
unsigned: JsonDict = {}
|
||||
allowed_fields["unsigned"] = unsigned
|
||||
|
||||
event_unsigned = event_dict.get("unsigned", {})
|
||||
@@ -164,16 +178,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
return allowed_fields
|
||||
|
||||
|
||||
def _copy_field(src, dst, field):
|
||||
def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None:
|
||||
"""Copy the field in 'src' to 'dst'.
|
||||
|
||||
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||
then dst={"foo":{"bar":5}}.
|
||||
|
||||
Args:
|
||||
src(dict): The dict to read from.
|
||||
dst(dict): The dict to modify.
|
||||
field(list<str>): List of keys to drill down to in 'src'.
|
||||
src: The dict to read from.
|
||||
dst: The dict to modify.
|
||||
field: List of keys to drill down to in 'src'.
|
||||
"""
|
||||
if len(field) == 0: # this should be impossible
|
||||
return
|
||||
@@ -205,7 +219,7 @@ def _copy_field(src, dst, field):
|
||||
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||
|
||||
|
||||
def only_fields(dictionary, fields):
|
||||
def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
|
||||
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||
in 'fields'.
|
||||
|
||||
@@ -215,11 +229,11 @@ def only_fields(dictionary, fields):
|
||||
A literal '.' character in a field name may be escaped using a '\'.
|
||||
|
||||
Args:
|
||||
dictionary(dict): The dictionary to read from.
|
||||
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||
dictionary: The dictionary to read from.
|
||||
fields: A list of fields to copy over. Only shallow refs are
|
||||
taken.
|
||||
Returns:
|
||||
dict: A new dictionary with only the given fields. If fields was empty,
|
||||
A new dictionary with only the given fields. If fields was empty,
|
||||
the same dictionary is returned.
|
||||
"""
|
||||
if len(fields) == 0:
|
||||
@@ -235,17 +249,17 @@ def only_fields(dictionary, fields):
|
||||
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
|
||||
]
|
||||
|
||||
output = {}
|
||||
output: JsonDict = {}
|
||||
for field_array in split_fields:
|
||||
_copy_field(dictionary, output, field_array)
|
||||
return output
|
||||
|
||||
|
||||
def format_event_raw(d):
|
||||
def format_event_raw(d: JsonDict) -> JsonDict:
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v1(d):
|
||||
def format_event_for_client_v1(d: JsonDict) -> JsonDict:
|
||||
d = format_event_for_client_v2(d)
|
||||
|
||||
sender = d.get("sender")
|
||||
@@ -267,7 +281,7 @@ def format_event_for_client_v1(d):
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2(d):
|
||||
def format_event_for_client_v2(d: JsonDict) -> JsonDict:
|
||||
drop_keys = (
|
||||
"auth_events",
|
||||
"prev_events",
|
||||
@@ -282,37 +296,37 @@ def format_event_for_client_v2(d):
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2_without_room_id(d):
|
||||
def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
|
||||
d = format_event_for_client_v2(d)
|
||||
d.pop("room_id", None)
|
||||
return d
|
||||
|
||||
|
||||
def serialize_event(
|
||||
e,
|
||||
time_now_ms,
|
||||
as_client_event=True,
|
||||
event_format=format_event_for_client_v1,
|
||||
token_id=None,
|
||||
only_event_fields=None,
|
||||
include_stripped_room_state=False,
|
||||
):
|
||||
e: Union[JsonDict, EventBase],
|
||||
time_now_ms: int,
|
||||
as_client_event: bool = True,
|
||||
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
|
||||
token_id: Optional[str] = None,
|
||||
only_event_fields: Optional[List[str]] = None,
|
||||
include_stripped_room_state: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
Args:
|
||||
e (EventBase)
|
||||
time_now_ms (int)
|
||||
as_client_event (bool)
|
||||
e
|
||||
time_now_ms
|
||||
as_client_event
|
||||
event_format
|
||||
token_id
|
||||
only_event_fields
|
||||
include_stripped_room_state (bool): Some events can have stripped room state
|
||||
include_stripped_room_state: Some events can have stripped room state
|
||||
stored in the `unsigned` field. This is required for invite and knock
|
||||
functionality. If this option is False, that state will be removed from the
|
||||
event before it is returned. Otherwise, it will be kept.
|
||||
|
||||
Returns:
|
||||
dict
|
||||
The serialized event dictionary.
|
||||
"""
|
||||
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
@@ -369,25 +383,27 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
self.experimental_msc1849_support_enabled = (
|
||||
hs.config.server.experimental_msc1849_support_enabled
|
||||
)
|
||||
self._msc1849_enabled = hs.config.experimental.msc1849_enabled
|
||||
|
||||
async def serialize_event(
|
||||
self, event, time_now, bundle_aggregations=True, **kwargs
|
||||
):
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
time_now: int,
|
||||
bundle_aggregations: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> JsonDict:
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
event (EventBase)
|
||||
time_now (int): The current time in milliseconds
|
||||
bundle_aggregations (bool): Whether to bundle in related events
|
||||
event
|
||||
time_now: The current time in milliseconds
|
||||
bundle_aggregations: Whether to bundle in related events
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
dict: The serialized event
|
||||
The serialized event
|
||||
"""
|
||||
# To handle the case of presence events and the like
|
||||
if not isinstance(event, EventBase):
|
||||
@@ -400,7 +416,7 @@ class EventClientSerializer:
|
||||
# we need to bundle in with the event.
|
||||
# Do not bundle relations if the event has been redacted
|
||||
if not event.internal_metadata.is_redacted() and (
|
||||
self.experimental_msc1849_support_enabled and bundle_aggregations
|
||||
self._msc1849_enabled and bundle_aggregations
|
||||
):
|
||||
annotations = await self.store.get_aggregation_groups_for_event(event_id)
|
||||
references = await self.store.get_relations_for_event(
|
||||
@@ -448,25 +464,27 @@ class EventClientSerializer:
|
||||
|
||||
return serialized_event
|
||||
|
||||
def serialize_events(self, events, time_now, **kwargs):
|
||||
async def serialize_events(
|
||||
self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
|
||||
) -> List[JsonDict]:
|
||||
"""Serializes multiple events.
|
||||
|
||||
Args:
|
||||
event (iter[EventBase])
|
||||
time_now (int): The current time in milliseconds
|
||||
event
|
||||
time_now: The current time in milliseconds
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
Deferred[list[dict]]: The list of serialized events
|
||||
The list of serialized events
|
||||
"""
|
||||
return yieldable_gather_results(
|
||||
return await yieldable_gather_results(
|
||||
self.serialize_event, events, time_now=time_now, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
):
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
@@ -475,7 +493,7 @@ def copy_power_levels_contents(
|
||||
if not isinstance(old_power_levels, collections.abc.Mapping):
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels = {}
|
||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
@@ -483,7 +501,8 @@ def copy_power_levels_contents(
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
power_levels[k] = h = {}
|
||||
h: Dict[str, int] = {}
|
||||
power_levels[k] = h
|
||||
for k1, v1 in v.items():
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
@@ -498,7 +517,7 @@ def copy_power_levels_contents(
|
||||
return power_levels
|
||||
|
||||
|
||||
def validate_canonicaljson(value: Any):
|
||||
def validate_canonicaljson(value: Any) -> None:
|
||||
"""
|
||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
from typing import Union
|
||||
from typing import Iterable, Union
|
||||
|
||||
import jsonschema
|
||||
|
||||
@@ -28,11 +28,11 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.federation.federation_server import server_matches_acl_event
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
from synapse.types import EventID, JsonDict, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator:
|
||||
def validate_new(self, event: EventBase, config: HomeServerConfig):
|
||||
def validate_new(self, event: EventBase, config: HomeServerConfig) -> None:
|
||||
"""Validates the event has roughly the right format
|
||||
|
||||
Args:
|
||||
@@ -116,7 +116,7 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def _validate_retention(self, event: EventBase):
|
||||
def _validate_retention(self, event: EventBase) -> None:
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
format enforced by the spec.
|
||||
|
||||
@@ -156,7 +156,7 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def validate_builder(self, event: Union[EventBase, EventBuilder]):
|
||||
def validate_builder(self, event: Union[EventBase, EventBuilder]) -> None:
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
fields an event would have
|
||||
@@ -204,14 +204,14 @@ class EventValidator:
|
||||
|
||||
self._ensure_state_event(event)
|
||||
|
||||
def _ensure_strings(self, d, keys):
|
||||
def _ensure_strings(self, d: JsonDict, keys: Iterable[str]) -> None:
|
||||
for s in keys:
|
||||
if s not in d:
|
||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||
if not isinstance(d[s], str):
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
def _ensure_state_event(self, event):
|
||||
def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None:
|
||||
if not event.is_state():
|
||||
raise SynapseError(400, "'%s' must be state events" % (event.type,))
|
||||
|
||||
@@ -244,7 +244,9 @@ POWER_LEVELS_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
def _create_power_level_validator():
|
||||
# This could return something newer than Draft 7, but that's the current "latest"
|
||||
# validator.
|
||||
def _create_power_level_validator() -> jsonschema.Draft7Validator:
|
||||
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
|
||||
|
||||
# by default jsonschema does not consider a frozendict to be an object so
|
||||
|
||||
@@ -182,22 +182,29 @@ class ApplicationServicesHandler:
|
||||
def notify_interested_services_ephemeral(
|
||||
self,
|
||||
stream_key: str,
|
||||
new_token: Optional[int],
|
||||
new_token: Union[int, RoomStreamToken],
|
||||
users: Optional[Collection[Union[str, UserID]]] = None,
|
||||
) -> None:
|
||||
"""This is called by the notifier in the background
|
||||
when a ephemeral event handled by the homeserver.
|
||||
"""
|
||||
This is called by the notifier in the background when an ephemeral event is handled
|
||||
by the homeserver.
|
||||
|
||||
This will determine which appservices
|
||||
are interested in the event, and submit them.
|
||||
|
||||
Events will only be pushed to appservices
|
||||
that have opted into ephemeral events
|
||||
This will determine which appservices are interested in the event, and submit them.
|
||||
|
||||
Args:
|
||||
stream_key: The stream the event came from.
|
||||
new_token: The latest stream token
|
||||
users: The user(s) involved with the event.
|
||||
|
||||
`stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other
|
||||
value for `stream_key` will cause this function to return early.
|
||||
|
||||
Ephemeral events will only be pushed to appservices that have opted into
|
||||
them.
|
||||
|
||||
Appservices will only receive ephemeral events that fall within their
|
||||
registered user and room namespaces.
|
||||
|
||||
new_token: The stream token of the event.
|
||||
users: The users that should be informed of the new event, if any.
|
||||
"""
|
||||
if not self.notify_appservices:
|
||||
return
|
||||
@@ -205,6 +212,13 @@ class ApplicationServicesHandler:
|
||||
if stream_key not in ("typing_key", "receipt_key", "presence_key"):
|
||||
return
|
||||
|
||||
# Convert new_token from a RoomStreamToken to an int if necessary.
|
||||
# We just use the minimum stream ordering and ignore the vector clock
|
||||
# component. This is safe to do as long as we *always* ignore the vector
|
||||
# clock components.
|
||||
if isinstance(new_token, RoomStreamToken):
|
||||
new_token = new_token.stream
|
||||
|
||||
services = [
|
||||
service
|
||||
for service in self.store.get_app_services()
|
||||
@@ -224,29 +238,39 @@ class ApplicationServicesHandler:
|
||||
self,
|
||||
services: List[ApplicationService],
|
||||
stream_key: str,
|
||||
new_token: Optional[int],
|
||||
new_token: int,
|
||||
users: Collection[Union[str, UserID]],
|
||||
) -> None:
|
||||
logger.debug("Checking interested services for %s" % (stream_key))
|
||||
with Measure(self.clock, "notify_interested_services_ephemeral"):
|
||||
for service in services:
|
||||
# Only handle typing if we have the latest token
|
||||
if stream_key == "typing_key" and new_token is not None:
|
||||
if stream_key == "typing_key":
|
||||
# Note that we don't persist the token (via set_type_stream_id_for_appservice)
|
||||
# for typing_key due to performance reasons and due to their highly
|
||||
# ephemeral nature.
|
||||
#
|
||||
# Instead we simply grab the latest typing updates in _handle_typing
|
||||
# and, if they apply to this application service, send it off.
|
||||
events = await self._handle_typing(service, new_token)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
# We don't persist the token for typing_key for performance reasons
|
||||
|
||||
elif stream_key == "receipt_key":
|
||||
events = await self._handle_receipts(service)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
|
||||
# Persist the latest handled stream token for this appservice
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "read_receipt", new_token
|
||||
)
|
||||
|
||||
elif stream_key == "presence_key":
|
||||
events = await self._handle_presence(service, users)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
|
||||
# Persist the latest handled stream token for this appservice
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "presence", new_token
|
||||
)
|
||||
@@ -254,18 +278,54 @@ class ApplicationServicesHandler:
|
||||
async def _handle_typing(
|
||||
self, service: ApplicationService, new_token: int
|
||||
) -> List[JsonDict]:
|
||||
"""
|
||||
Return the typing events since the given stream token that the given application
|
||||
service should receive.
|
||||
|
||||
First fetch all typing events between the given typing stream token (non-inclusive)
|
||||
and the latest typing event stream token (inclusive). Then return only those typing
|
||||
events that the given application service may be interested in.
|
||||
|
||||
Args:
|
||||
service: The application service to check for which events it should receive.
|
||||
new_token: A typing event stream token.
|
||||
|
||||
Returns:
|
||||
A list of JSON dictionaries containing data derived from the typing events that
|
||||
should be sent to the given application service.
|
||||
"""
|
||||
typing_source = self.event_sources.sources.typing
|
||||
# Get the typing events from just before current
|
||||
typing, _ = await typing_source.get_new_events_as(
|
||||
service=service,
|
||||
# For performance reasons, we don't persist the previous
|
||||
# token in the DB and instead fetch the latest typing information
|
||||
# token in the DB and instead fetch the latest typing event
|
||||
# for appservices.
|
||||
# TODO: It'd likely be more efficient to simply fetch the
|
||||
# typing event with the given 'new_token' stream token and
|
||||
# check if the given service was interested, rather than
|
||||
# iterating over all typing events and only grabbing the
|
||||
# latest few.
|
||||
from_key=new_token - 1,
|
||||
)
|
||||
return typing
|
||||
|
||||
async def _handle_receipts(self, service: ApplicationService) -> List[JsonDict]:
|
||||
"""
|
||||
Return the latest read receipts that the given application service should receive.
|
||||
|
||||
First fetch all read receipts between the last receipt stream token that this
|
||||
application service should have previously received (non-inclusive) and the
|
||||
latest read receipt stream token (inclusive). Then from that set, return only
|
||||
those read receipts that the given application service may be interested in.
|
||||
|
||||
Args:
|
||||
service: The application service to check for which events it should receive.
|
||||
|
||||
Returns:
|
||||
A list of JSON dictionaries containing data derived from the read receipts that
|
||||
should be sent to the given application service.
|
||||
"""
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
service, "read_receipt"
|
||||
)
|
||||
@@ -278,6 +338,22 @@ class ApplicationServicesHandler:
|
||||
async def _handle_presence(
|
||||
self, service: ApplicationService, users: Collection[Union[str, UserID]]
|
||||
) -> List[JsonDict]:
|
||||
"""
|
||||
Return the latest presence updates that the given application service should receive.
|
||||
|
||||
First, filter the given users list to those that the application service is
|
||||
interested in. Then retrieve the latest presence updates since the
|
||||
the last-known previously received presence stream token for the given
|
||||
application service. Return those presence updates.
|
||||
|
||||
Args:
|
||||
service: The application service that ephemeral events are being sent to.
|
||||
users: The users that should receive the presence update.
|
||||
|
||||
Returns:
|
||||
A list of json dictionaries containing data derived from the presence events
|
||||
that should be sent to the given application service.
|
||||
"""
|
||||
events: List[JsonDict] = []
|
||||
presence_source = self.event_sources.sources.presence
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
@@ -290,9 +366,9 @@ class ApplicationServicesHandler:
|
||||
interested = await service.is_interested_in_presence(user, self.store)
|
||||
if not interested:
|
||||
continue
|
||||
|
||||
presence_events, _ = await presence_source.get_new_events(
|
||||
user=user,
|
||||
service=service,
|
||||
from_key=from_key,
|
||||
)
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user