Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d44ee6868 | ||
|
|
df84ad602b | ||
|
|
576921c66a | ||
|
|
b3e843be88 | ||
|
|
e0ef8fe58d | ||
|
|
b615fc35d6 | ||
|
|
f3a4be8700 | ||
|
|
72626b78ef | ||
|
|
2dbef6c10a | ||
|
|
60ad9460c4 | ||
|
|
400f391f71 | ||
|
|
34b0222c2b | ||
|
|
cc75a6b1b2 | ||
|
|
7004f43da1 | ||
|
|
d52c58dfa3 | ||
|
|
8c8e36af0d | ||
|
|
63cbdd8af0 | ||
|
|
c1510c97b5 | ||
|
|
4387b791e0 | ||
|
|
da957a60e8 | ||
|
|
85a09f8b8b | ||
|
|
2b82ec425f | ||
|
|
b9ce53e878 | ||
|
|
b0f03aeb6a | ||
|
|
ba00e20234 | ||
|
|
2d91b6256e | ||
|
|
6408372234 | ||
|
|
0f9adc99ad | ||
|
|
09eff1b3db | ||
|
|
ef7fe09778 | ||
|
|
57501d9194 | ||
|
|
62db603fa0 | ||
|
|
0930e9ae12 | ||
|
|
2c61a318cc | ||
|
|
ee2cee5f52 | ||
|
|
106d99b8cd | ||
|
|
78d5896d19 | ||
|
|
9b016a0fb4 | ||
|
|
522489fbcd | ||
|
|
df95d3aec2 | ||
|
|
0dd0c40329 | ||
|
|
5e0e683541 | ||
|
|
a6c318735d | ||
|
|
95813ff43c | ||
|
|
a21f8c4b41 | ||
|
|
8b1185347a | ||
|
|
191396f4ba | ||
|
|
f3efa0036b | ||
|
|
0170774b19 | ||
|
|
d85bc9a4a7 | ||
|
|
3ab55d43bd | ||
|
|
cc33d9eee2 | ||
|
|
a5d2ea3d08 | ||
|
|
73743b8ad1 | ||
|
|
e8f24b6c35 | ||
|
|
7d70582eb0 | ||
|
|
37b845dabc | ||
|
|
e09be0c87a | ||
|
|
5573133348 | ||
|
|
6a67f3786a | ||
|
|
013e0f9cae | ||
|
|
daf498e099 | ||
|
|
efd0074ab7 | ||
|
|
e2f0b49b3f | ||
|
|
1609ccf8fe | ||
|
|
50d8601581 | ||
|
|
b3698f945c | ||
|
|
b1c1a34f46 | ||
|
|
4d761d24ba | ||
|
|
87c3a6dcc0 | ||
|
|
99a4e5222d | ||
|
|
35d6b914eb | ||
|
|
404444260a | ||
|
|
317e9e415c | ||
|
|
b59f3281d5 | ||
|
|
b3e9b00fb2 | ||
|
|
1f9d0b8a7a | ||
|
|
cdd308845b | ||
|
|
732bbf6737 | ||
|
|
b83e822556 | ||
|
|
2a2b189130 | ||
|
|
8711e15734 | ||
|
|
988de0afb0 | ||
|
|
5dcacdf6d1 | ||
|
|
9abc5f2a05 | ||
|
|
84f5d83257 | ||
|
|
8eaffe013c | ||
|
|
1db9282dfa | ||
|
|
77ea03086c | ||
|
|
333d6f4e84 | ||
|
|
5c35074d85 | ||
|
|
36224e056a | ||
|
|
a18c568516 | ||
|
|
a5871f53ed | ||
|
|
8afa48f7f6 | ||
|
|
f6b62bdc4d | ||
|
|
b8b905c4ea | ||
|
|
9e13cd98af | ||
|
|
6b18eb4430 | ||
|
|
b01e953291 | ||
|
|
60af28c5dd | ||
|
|
8c5255b664 | ||
|
|
406f7bfa17 | ||
|
|
e0f11ae4a5 | ||
|
|
5e29d417fc | ||
|
|
3828dd819b | ||
|
|
4c838112dc | ||
|
|
b742cb2e4a | ||
|
|
a7d22c36db | ||
|
|
1b112840d2 | ||
|
|
593eeac19e | ||
|
|
d51a340019 | ||
|
|
9f23ff78da | ||
|
|
c576598a68 | ||
|
|
51a5da74cc | ||
|
|
797ee7812d | ||
|
|
670a8d9a1e | ||
|
|
eb9ddc8c2e | ||
|
|
49a683d871 | ||
|
|
bb228f3523 | ||
|
|
0b4d5ce5e3 | ||
|
|
e79ee48313 | ||
|
|
7301019d48 | ||
|
|
e0bf34dada | ||
|
|
96fe77c254 | ||
|
|
86af6b2f0e | ||
|
|
52aefd5086 | ||
|
|
f563676c09 | ||
|
|
e564bdd127 | ||
|
|
4e51621064 | ||
|
|
f4b1a9a527 | ||
|
|
829f2a82b0 | ||
|
|
b0460936c8 | ||
|
|
370bca32e6 | ||
|
|
38b7db5885 | ||
|
|
c80878d22a | ||
|
|
f8d0f72b27 | ||
|
|
6744273f0b | ||
|
|
4f00432ce1 | ||
|
|
392863fbf1 | ||
|
|
2faac70e63 | ||
|
|
3a5b0cbe7a | ||
|
|
787af4a106 | ||
|
|
d099535deb | ||
|
|
cb88ed912b | ||
|
|
6f6e956338 | ||
|
|
7036a7a60a | ||
|
|
660c8c1415 | ||
|
|
eda8c88b84 | ||
|
|
30f0240401 | ||
|
|
730b40dd5e | ||
|
|
f7b034a24b | ||
|
|
a0f48ee89d | ||
|
|
a071144a5c | ||
|
|
e46ac85d67 | ||
|
|
7e440520c9 | ||
|
|
9e5a429c8b | ||
|
|
d1bf5f7c9d | ||
|
|
7d84d2523a | ||
|
|
44dee1fe8c | ||
|
|
145cb6d08e | ||
|
|
29364145b2 | ||
|
|
3aefc7b66d | ||
|
|
428174f902 | ||
|
|
a19aa8b162 | ||
|
|
176aa55fd5 | ||
|
|
94b620a5ed | ||
|
|
8cef1ab2ac | ||
|
|
5279b9161b | ||
|
|
2be0fde3d6 | ||
|
|
9fd057b8c5 | ||
|
|
0f007fe009 | ||
|
|
8aaa4b7b5d | ||
|
|
2622b28c5c | ||
|
|
eb2c7e51c4 | ||
|
|
c3ccad7785 |
57
.ci/scripts/test_export_data_command.sh
Executable file
57
.ci/scripts/test_export_data_command.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Test for the export-data admin command against sqlite and postgres
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir="/tmp/export_data/rooms"
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a sqlite database."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir2="/tmp/export_data2/rooms"
|
||||
if [ -d "$dir2" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a postgres database."
|
||||
exit 1
|
||||
fi
|
||||
@@ -25,7 +25,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .ci/sqlite-config.yaml
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
@@ -46,7 +46,7 @@ echo "--- Prepare empty SQLite database"
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .ci/test_db.db
|
||||
|
||||
scripts-dev/update_database --database-config .ci/sqlite-config.yaml
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py \
|
||||
|
||||
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Automatically request reviews from the synapse-core team when a pull request comes in.
|
||||
* @matrix-org/synapse-core
|
||||
58
.github/workflows/tests.yml
vendored
58
.github/workflows/tests.yml
vendored
@@ -76,22 +76,25 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
toxenv: ["py"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.9"
|
||||
toxenv: "py-noextras,combine"
|
||||
- python-version: "3.10"
|
||||
toxenv: "py-noextras"
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.6"
|
||||
database: "postgres"
|
||||
postgres-version: "9.6"
|
||||
toxenv: "py"
|
||||
|
||||
# Newest Python with PostgreSQL
|
||||
- python-version: "3.9"
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "13"
|
||||
postgres-version: "14"
|
||||
toxenv: "py"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -111,7 +114,7 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: tox -e py,combine
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -119,6 +122,8 @@ jobs:
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -143,6 +148,8 @@ jobs:
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -169,10 +176,12 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- run: tox -e py,combine
|
||||
- run: tox -e py
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -244,6 +253,35 @@ jobs:
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
export-data:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: [linting-done, portdb]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.9"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
@@ -256,8 +294,8 @@ jobs:
|
||||
- python-version: "3.6"
|
||||
postgres-version: "9.6"
|
||||
|
||||
- python-version: "3.9"
|
||||
postgres-version: "13"
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
|
||||
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@@ -33,6 +33,8 @@ jobs:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
|
||||
220
CHANGES.md
220
CHANGES.md
@@ -1,3 +1,223 @@
|
||||
Synapse 1.46.0 (2021-11-02)
|
||||
===========================
|
||||
|
||||
The cause of the [performance regression affecting Synapse 1.44](https://github.com/matrix-org/synapse/issues/11049) has been identified and fixed. ([\#11177](https://github.com/matrix-org/synapse/issues/11177))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.46.0rc1 where URL previews of some XML documents would fail. ([\#11196](https://github.com/matrix-org/synapse/issues/11196))
|
||||
|
||||
|
||||
Synapse 1.46.0rc1 (2021-10-27)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support for Ubuntu 21.10 "Impish Indri". ([\#11024](https://github.com/matrix-org/synapse/issues/11024))
|
||||
- Port the Password Auth Providers module interface to the new generic interface. ([\#10548](https://github.com/matrix-org/synapse/issues/10548), [\#11180](https://github.com/matrix-org/synapse/issues/11180))
|
||||
- Experimental support for the thread relation defined in [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11088](https://github.com/matrix-org/synapse/issues/11088), [\#11181](https://github.com/matrix-org/synapse/issues/11181), [\#11192](https://github.com/matrix-org/synapse/issues/11192))
|
||||
- Users admin API can now also modify user type in addition to allowing it to be set on user creation. ([\#11174](https://github.com/matrix-org/synapse/issues/11174))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Newly-created public rooms are now only assigned an alias if the room's creation has not been blocked by permission settings. Contributed by @AndrewFerr. ([\#10930](https://github.com/matrix-org/synapse/issues/10930))
|
||||
- Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#11001](https://github.com/matrix-org/synapse/issues/11001), [\#11009](https://github.com/matrix-org/synapse/issues/11009), [\#11012](https://github.com/matrix-org/synapse/issues/11012))
|
||||
- Fix 500 error on `/messages` when the server accumulates more than 5 backwards extremities at a given depth for a room. ([\#11027](https://github.com/matrix-org/synapse/issues/11027))
|
||||
- Fix a bug where setting a user's `external_id` via the admin API returns 500 and deletes user's existing external mappings if that external ID is already mapped. ([\#11051](https://github.com/matrix-org/synapse/issues/11051))
|
||||
- Fix a long-standing bug where users excluded from the user directory were added into the directory if they belonged to a room which became public or private. ([\#11075](https://github.com/matrix-org/synapse/issues/11075))
|
||||
- Fix a long-standing bug when attempting to preview URLs which are in the `windows-1252` character encoding. ([\#11077](https://github.com/matrix-org/synapse/issues/11077), [\#11089](https://github.com/matrix-org/synapse/issues/11089))
|
||||
- Fix broken export-data admin command and add test script checking the command to CI. ([\#11078](https://github.com/matrix-org/synapse/issues/11078))
|
||||
- Show an error when timestamp in seconds is provided to the `/purge_media_cache` Admin API. ([\#11101](https://github.com/matrix-org/synapse/issues/11101))
|
||||
- Fix local users who left all their rooms being removed from the user directory, even if the `search_all_users` config option was enabled. ([\#11103](https://github.com/matrix-org/synapse/issues/11103))
|
||||
- Fix a bug which caused the module API's `get_user_ip_and_agents` function to always fail on workers. `get_user_ip_and_agents` was introduced in 1.44.0 and did not function correctly on worker processes at the time. ([\#11112](https://github.com/matrix-org/synapse/issues/11112))
|
||||
- Identity server connection is no longer ignoring `ip_range_whitelist`. ([\#11120](https://github.com/matrix-org/synapse/issues/11120))
|
||||
- Fix a bug introduced in Synapse 1.45.0 breaking the configuration file parsing script. ([\#11145](https://github.com/matrix-org/synapse/issues/11145))
|
||||
- Fix a performance regression introduced in 1.44.0 which could cause client requests to time out when making large numbers of outbound requests. ([\#11177](https://github.com/matrix-org/synapse/issues/11177), [\#11190](https://github.com/matrix-org/synapse/issues/11190))
|
||||
- Resolve and share `state_groups` for all [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical events in batch. ([\#10975](https://github.com/matrix-org/synapse/issues/10975))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix broken links relating to module API deprecation in the upgrade notes. ([\#11069](https://github.com/matrix-org/synapse/issues/11069))
|
||||
- Add more information about what happens when a user is deactivated. ([\#11083](https://github.com/matrix-org/synapse/issues/11083))
|
||||
- Clarify the the sample log config can be copied from the documentation without issue. ([\#11092](https://github.com/matrix-org/synapse/issues/11092))
|
||||
- Update the admin API documentation with an updated list of the characters allowed in registration tokens. ([\#11093](https://github.com/matrix-org/synapse/issues/11093))
|
||||
- Document Synapse's behaviour when dealing with multiple modules registering the same callbacks and/or handlers for the same HTTP endpoints. ([\#11096](https://github.com/matrix-org/synapse/issues/11096))
|
||||
- Fix instances of `[example]{.title-ref}` in the upgrade documentation as a result of prior RST to Markdown conversion. ([\#11118](https://github.com/matrix-org/synapse/issues/11118))
|
||||
- Document the version of Synapse each module callback was introduced in. ([\#11132](https://github.com/matrix-org/synapse/issues/11132))
|
||||
- Document the version of Synapse that introduced each module API method. ([\#11183](https://github.com/matrix-org/synapse/issues/11183))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
- Fix spurious warnings about losing the logging context on the `ReplicationCommandHandler` when losing the replication connection. ([\#10984](https://github.com/matrix-org/synapse/issues/10984))
|
||||
- Include rejected status when we log events. ([\#11008](https://github.com/matrix-org/synapse/issues/11008))
|
||||
- Add some extra logging to the event persistence code. ([\#11014](https://github.com/matrix-org/synapse/issues/11014))
|
||||
- Rearrange the internal workings of the incremental user directory updates. ([\#11035](https://github.com/matrix-org/synapse/issues/11035))
|
||||
- Fix a long-standing bug where users excluded from the directory could still be added to the `users_who_share_private_rooms` table after a regular user joins a private room. ([\#11143](https://github.com/matrix-org/synapse/issues/11143))
|
||||
- Add and improve type hints. ([\#10972](https://github.com/matrix-org/synapse/issues/10972), [\#11055](https://github.com/matrix-org/synapse/issues/11055), [\#11066](https://github.com/matrix-org/synapse/issues/11066), [\#11076](https://github.com/matrix-org/synapse/issues/11076), [\#11095](https://github.com/matrix-org/synapse/issues/11095), [\#11109](https://github.com/matrix-org/synapse/issues/11109), [\#11121](https://github.com/matrix-org/synapse/issues/11121), [\#11146](https://github.com/matrix-org/synapse/issues/11146))
|
||||
- Mark the Synapse package as containing type annotations and fix export declarations so that Synapse pluggable modules may be type checked against Synapse. ([\#11054](https://github.com/matrix-org/synapse/issues/11054))
|
||||
- Remove dead code from `MediaFilePaths`. ([\#11056](https://github.com/matrix-org/synapse/issues/11056))
|
||||
- Be more lenient when parsing oEmbed response versions. ([\#11065](https://github.com/matrix-org/synapse/issues/11065))
|
||||
- Create a separate module for the retention configuration. ([\#11070](https://github.com/matrix-org/synapse/issues/11070))
|
||||
- Clean up some of the federation event authentication code for clarity. ([\#11115](https://github.com/matrix-org/synapse/issues/11115), [\#11116](https://github.com/matrix-org/synapse/issues/11116), [\#11122](https://github.com/matrix-org/synapse/issues/11122))
|
||||
- Add docstrings and comments to the application service ephemeral event sending code. ([\#11138](https://github.com/matrix-org/synapse/issues/11138))
|
||||
- Update the `sign_json` script to support inline configuration of the signing key. ([\#11139](https://github.com/matrix-org/synapse/issues/11139))
|
||||
- Fix broken link in the docker image README. ([\#11144](https://github.com/matrix-org/synapse/issues/11144))
|
||||
- Always dump logs from unit tests during CI runs. ([\#11068](https://github.com/matrix-org/synapse/issues/11068))
|
||||
- Add tests for `MediaFilePaths` class. ([\#11057](https://github.com/matrix-org/synapse/issues/11057))
|
||||
- Simplify the user admin API tests. ([\#11048](https://github.com/matrix-org/synapse/issues/11048))
|
||||
- Add a test for the workaround introduced in [\#11042](https://github.com/matrix-org/synapse/pull/11042) concerning the behaviour of third-party rule modules and `SynapseError`s. ([\#11071](https://github.com/matrix-org/synapse/issues/11071))
|
||||
|
||||
|
||||
Synapse 1.45.1 (2021-10-20)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Revert change to counting of deactivated users towards the monthly active users limit, introduced in 1.45.0rc1. ([\#11127](https://github.com/matrix-org/synapse/issues/11127))
|
||||
|
||||
|
||||
Synapse 1.45.0 (2021-10-19)
|
||||
===========================
|
||||
|
||||
No functional changes since Synapse 1.45.0rc2.
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of 1.44.0 remains unresolved.
|
||||
|
||||
We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression.
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Reword changelog to clarify concerns about a suspected performance regression in 1.44.0. ([\#11117](https://github.com/matrix-org/synapse/issues/11117))
|
||||
|
||||
|
||||
Synapse 1.45.0rc2 (2021-10-14)
|
||||
==============================
|
||||
|
||||
This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) a user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1.
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of 1.44.0 remains unresolved.
|
||||
|
||||
We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug when using multiple event persister workers where events were not correctly sent down `/sync` due to a race. ([\#11045](https://github.com/matrix-org/synapse/issues/11045))
|
||||
- Fix a bug introduced in Synapse 1.45.0rc1 where the user directory would stop updating if it processed an event from a
|
||||
user not in the `users` table. ([\#11053](https://github.com/matrix-org/synapse/issues/11053))
|
||||
- Fix a bug introduced in Synapse 1.44.0 when logging errors during oEmbed processing. ([\#11061](https://github.com/matrix-org/synapse/issues/11061))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add an 'approximate difference' method to `StateFilter`. ([\#10825](https://github.com/matrix-org/synapse/issues/10825))
|
||||
- Fix inconsistent behavior of `get_last_client_by_ip` when reporting data that has not been stored in the database yet. ([\#10970](https://github.com/matrix-org/synapse/issues/10970))
|
||||
- Fix a bug introduced in Synapse 1.21.0 that causes opentracing and Prometheus metrics for replication requests to be measured incorrectly. ([\#10996](https://github.com/matrix-org/synapse/issues/10996))
|
||||
- Ensure that cache config tests do not share state. ([\#11036](https://github.com/matrix-org/synapse/issues/11036))
|
||||
|
||||
|
||||
Synapse 1.45.0rc1 (2021-10-12)
|
||||
==============================
|
||||
|
||||
**Note:** Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more information.
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
- We are investigating [a performance issue](https://github.com/matrix-org/synapse/issues/11049) which was reported after the release of 1.44.0.
|
||||
- We are aware of [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. A second release candidate is expected which will resolve this.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`. ([\#9655](https://github.com/matrix-org/synapse/issues/9655))
|
||||
- Support autodiscovery of oEmbed previews. ([\#10822](https://github.com/matrix-org/synapse/issues/10822))
|
||||
- Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites. ([\#10894](https://github.com/matrix-org/synapse/issues/10894))
|
||||
- Add a spam checker callback to allow or deny room joins. ([\#10910](https://github.com/matrix-org/synapse/issues/10910))
|
||||
- Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper. ([\#10954](https://github.com/matrix-org/synapse/issues/10954))
|
||||
- Include exception information in JSON logging output. Contributed by @Fizzadar at Beeper. ([\#11028](https://github.com/matrix-org/synapse/issues/11028))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. ([\#10922](https://github.com/matrix-org/synapse/issues/10922))
|
||||
- Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. ([\#10924](https://github.com/matrix-org/synapse/issues/10924))
|
||||
- Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8 and 9 could be applied to earlier room versions in some situations. ([\#10927](https://github.com/matrix-org/synapse/issues/10927))
|
||||
- Fix a long-standing bug wherein deactivated users still count towards the monthly active users limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947))
|
||||
- Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#10956](https://github.com/matrix-org/synapse/issues/10956))
|
||||
- Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and deactivated users. ([\#10960](https://github.com/matrix-org/synapse/issues/10960))
|
||||
- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. ([\#10962](https://github.com/matrix-org/synapse/issues/10962))
|
||||
- Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt. ([\#10981](https://github.com/matrix-org/synapse/issues/10981))
|
||||
- Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch. ([\#10982](https://github.com/matrix-org/synapse/issues/10982))
|
||||
- Correct a bugfix introduced in Synapse v1.44.0 that would catch the wrong error if a connection is lost before a response could be written to it. ([\#10995](https://github.com/matrix-org/synapse/issues/10995))
|
||||
- Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user directory. ([\#11002](https://github.com/matrix-org/synapse/issues/11002))
|
||||
- Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. ([\#11003](https://github.com/matrix-org/synapse/issues/11003))
|
||||
- Work around a regression, introduced in Synapse v1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. ([\#11042](https://github.com/matrix-org/synapse/issues/11042))
|
||||
- Fix a bug in [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms that could cause cross-talk/conflicts between batches. ([\#10877](https://github.com/matrix-org/synapse/issues/10877))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Change wording ("reference homeserver") in Synapse repository documentation. Contributed by @maxkratz. ([\#10971](https://github.com/matrix-org/synapse/issues/10971))
|
||||
- Fix a dead URL in development documentation (SAML) and change wording from "Riot" to "Element". Contributed by @maxkratz. ([\#10973](https://github.com/matrix-org/synapse/issues/10973))
|
||||
- Add additional content to the Welcome and Overview page of the documentation. ([\#10990](https://github.com/matrix-org/synapse/issues/10990))
|
||||
- Update links to MSCs in documentation. Contributed by @dklimpel. ([\#10991](https://github.com/matrix-org/synapse/issues/10991))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve type hinting in `synapse.util`. ([\#10888](https://github.com/matrix-org/synapse/issues/10888))
|
||||
- Add further type hints to `synapse.storage.util`. ([\#10892](https://github.com/matrix-org/synapse/issues/10892))
|
||||
- Fix type hints to be compatible with an upcoming change to Twisted. ([\#10895](https://github.com/matrix-org/synapse/issues/10895))
|
||||
- Update utility code to handle C implementations of frozendict. ([\#10902](https://github.com/matrix-org/synapse/issues/10902))
|
||||
- Drop old functionality which maintained database compatibility with Synapse versions before v1.31. ([\#10903](https://github.com/matrix-org/synapse/issues/10903))
|
||||
- Clean-up configuration helper classes for the `ServerConfig` class. ([\#10915](https://github.com/matrix-org/synapse/issues/10915))
|
||||
- Use direct references to config flags. ([\#10916](https://github.com/matrix-org/synapse/issues/10916), [\#10959](https://github.com/matrix-org/synapse/issues/10959), [\#10985](https://github.com/matrix-org/synapse/issues/10985))
|
||||
- Clean up some of the federation event authentication code for clarity. ([\#10926](https://github.com/matrix-org/synapse/issues/10926), [\#10940](https://github.com/matrix-org/synapse/issues/10940), [\#10986](https://github.com/matrix-org/synapse/issues/10986), [\#10987](https://github.com/matrix-org/synapse/issues/10987), [\#10988](https://github.com/matrix-org/synapse/issues/10988), [\#11010](https://github.com/matrix-org/synapse/issues/11010), [\#11011](https://github.com/matrix-org/synapse/issues/11011))
|
||||
- Refactor various parts of the codebase to use `RoomVersion` objects instead of room version identifier strings. ([\#10934](https://github.com/matrix-org/synapse/issues/10934))
|
||||
- Refactor user directory tests in preparation for upcoming changes. ([\#10935](https://github.com/matrix-org/synapse/issues/10935))
|
||||
- Include the event id in the logcontext when handling PDUs received over federation. ([\#10936](https://github.com/matrix-org/synapse/issues/10936))
|
||||
- Fix logged errors in unit tests. ([\#10939](https://github.com/matrix-org/synapse/issues/10939))
|
||||
- Fix a broken test to ensure that consent configuration works during registration. ([\#10945](https://github.com/matrix-org/synapse/issues/10945))
|
||||
- Add type hints to filtering classes. ([\#10958](https://github.com/matrix-org/synapse/issues/10958))
|
||||
- Add type-hint to `HomeserverTestcase.setup_test_homeserver`. ([\#10961](https://github.com/matrix-org/synapse/issues/10961))
|
||||
- Fix the test utility function `create_room_as` so that `is_public=True` will explicitly set the `visibility` parameter of room creation requests to `public`. Contributed by @AndrewFerr. ([\#10963](https://github.com/matrix-org/synapse/issues/10963))
|
||||
- Make the release script more robust and transparent. ([\#10966](https://github.com/matrix-org/synapse/issues/10966))
|
||||
- Refactor [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` mega function into smaller handler functions. ([\#10974](https://github.com/matrix-org/synapse/issues/10974))
|
||||
- Log stack traces when a missing opentracing span is detected. ([\#10983](https://github.com/matrix-org/synapse/issues/10983))
|
||||
- Update GHA config to run tests against Python 3.10 and PostgreSQL 14. ([\#10992](https://github.com/matrix-org/synapse/issues/10992))
|
||||
- Fix a long-standing bug where `ReadWriteLock`s could drop logging contexts on exit. ([\#10993](https://github.com/matrix-org/synapse/issues/10993))
|
||||
- Add a `CODEOWNERS` file to automatically request reviews from the `@matrix-org/synapse-core` team on new pull requests. ([\#10994](https://github.com/matrix-org/synapse/issues/10994))
|
||||
- Add further type hints to `synapse.state`. ([\#11004](https://github.com/matrix-org/synapse/issues/11004))
|
||||
- Remove the deprecated `BaseHandler` object. ([\#11005](https://github.com/matrix-org/synapse/issues/11005))
|
||||
- Bump mypy version for CI to 0.910, and pull in new type stubs for dependencies. ([\#11006](https://github.com/matrix-org/synapse/issues/11006))
|
||||
- Fix CI to run the unit tests without optional deps. ([\#11017](https://github.com/matrix-org/synapse/issues/11017))
|
||||
- Ensure that cache config tests do not share state. ([\#11019](https://github.com/matrix-org/synapse/issues/11019))
|
||||
- Add additional type hints to `synapse.server_notices`. ([\#11021](https://github.com/matrix-org/synapse/issues/11021))
|
||||
- Add additional type hints for `synapse.push`. ([\#11023](https://github.com/matrix-org/synapse/issues/11023))
|
||||
- When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. ([\#11034](https://github.com/matrix-org/synapse/issues/11034))
|
||||
- Remove unnecessary list comprehension from `synapse_port_db` to satisfy code style requirements. ([\#11043](https://github.com/matrix-org/synapse/issues/11043))
|
||||
|
||||
|
||||
Synapse 1.44.0 (2021-10-05)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
include synapse/py.typed
|
||||
recursive-include synapse/storage *.sql
|
||||
recursive-include synapse/storage *.sql.postgres
|
||||
recursive-include synapse/storage *.sql.sqlite
|
||||
|
||||
@@ -55,11 +55,8 @@ solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||
codebase and let you run your own homeserver and generally help bootstrap the
|
||||
ecosystem.
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
@@ -301,7 +298,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,test]"
|
||||
pip install -e ".[all,dev]"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env. If any dependencies fail to install,
|
||||
|
||||
44
debian/changelog
vendored
44
debian/changelog
vendored
@@ -1,3 +1,47 @@
|
||||
matrix-synapse-py3 (1.46.0) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Compress debs with xz, to fix incompatibility of impish debs with reprepro.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.46.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Nov 2021 13:22:53 +0000
|
||||
|
||||
matrix-synapse-py3 (1.46.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.46.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Oct 2021 14:04:04 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Oct 2021 11:58:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Oct 2021 11:18:53 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Oct 2021 10:58:24 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0~rc1) stable; urgency=medium
|
||||
|
||||
[ Nick @ Beeper ]
|
||||
* Include an `update_synapse_database` script in the distribution.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.45.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Oct 2021 10:46:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.44.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.44.0.
|
||||
|
||||
1
debian/matrix-synapse-py3.links
vendored
1
debian/matrix-synapse-py3.links
vendored
@@ -3,3 +3,4 @@ opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matri
|
||||
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
||||
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
|
||||
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
||||
opt/venvs/matrix-synapse/bin/update_synapse_database usr/bin/update_synapse_database
|
||||
|
||||
6
debian/rules
vendored
6
debian/rules
vendored
@@ -51,5 +51,11 @@ override_dh_shlibdeps:
|
||||
override_dh_virtualenv:
|
||||
./debian/build_virtualenv
|
||||
|
||||
override_dh_builddeb:
|
||||
# force the compression to xzip, to stop dpkg-deb on impish defaulting to zstd
|
||||
# (which requires reprepro 5.3.0-1.3, which is currently only in 'experimental' in Debian:
|
||||
# https://metadata.ftp-master.debian.org/changelogs/main/r/reprepro/reprepro_5.3.0-1.3_changelog)
|
||||
dh_builddeb -- -Zxz
|
||||
|
||||
%:
|
||||
dh $@ --with python-virtualenv
|
||||
|
||||
@@ -226,4 +226,5 @@ healthcheck:
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Historical Note
|
||||
This document was originally written to guide server admins through the upgrade
|
||||
path towards Synapse 1.0. Specifically,
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md)
|
||||
required that all servers present valid TLS certificates on their federation
|
||||
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
|
||||
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
|
||||
@@ -282,7 +282,7 @@ coffin of the Perspectives project (which was already pretty dead). So, the
|
||||
Spec Core Team decided that a better approach would be to mandate valid TLS
|
||||
certificates for federation alongside the rest of the Web. More details can be
|
||||
found in
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
|
||||
|
||||
This results in a breaking change, which is disruptive, but absolutely critical
|
||||
for the security model. However, the existence of Let's Encrypt as a trivial
|
||||
|
||||
@@ -6,9 +6,9 @@ Please update any links to point to the new website instead.
|
||||
## About
|
||||
|
||||
This directory currently holds a series of markdown files documenting how to install, use
|
||||
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
|
||||
from this repository, but it is recommended to instead browse through the
|
||||
[website](https://matrix-org.github.io/synapse) for easier discoverability.
|
||||
and develop Synapse. The documentation is readable directly from this repository, but it is
|
||||
recommended to instead browse through the [website](https://matrix-org.github.io/synapse) for
|
||||
easier discoverability.
|
||||
|
||||
## Adding to the documentation
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
- [Third-party rules callbacks](modules/third_party_rules_callbacks.md)
|
||||
- [Presence router callbacks](modules/presence_router_callbacks.md)
|
||||
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
||||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
|
||||
@@ -257,9 +257,9 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||
last access and not the timestamp creation.
|
||||
last access, not the timestamp when the file was created.
|
||||
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
||||
Files that are larger will be deleted. Defaults to `0`.
|
||||
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
||||
@@ -302,7 +302,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
@@ -50,7 +50,8 @@ It returns a JSON body like the following:
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
]
|
||||
],
|
||||
"user_type": null
|
||||
}
|
||||
```
|
||||
|
||||
@@ -97,7 +98,8 @@ with a body of:
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
"deactivated": false,
|
||||
"user_type": null
|
||||
}
|
||||
```
|
||||
|
||||
@@ -135,6 +137,9 @@ Body parameters:
|
||||
unchanged on existing accounts and set to `false` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on
|
||||
deactivating users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - string or null, optional. If provided, the user type will be
|
||||
adjusted. If `null` given, the user type will be cleared. Other
|
||||
allowed options are: `bot` and `support`.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
@@ -341,6 +346,7 @@ The following actions are performed when deactivating an user:
|
||||
- Remove all 3PIDs from the homeserver
|
||||
- Delete all devices and E2EE keys
|
||||
- Delete all access tokens
|
||||
- Delete all pushers
|
||||
- Delete the password hash
|
||||
- Removal from all rooms the user is a member of
|
||||
- Remove the user from the user directory
|
||||
@@ -354,6 +360,15 @@ is set to `true`:
|
||||
- Remove the user's avatar URL
|
||||
- Mark the user as erased
|
||||
|
||||
The following actions are **NOT** performed. The list may be incomplete.
|
||||
|
||||
- Remove mappings of SSO IDs
|
||||
- [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
|
||||
- Delete sent and received messages
|
||||
- Delete E2E cross-signing keys
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
|
||||
## Reset password
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ setup a *virtualenv*, as follows:
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,lint,mypy,test]"
|
||||
pip install -e ".[all,dev]"
|
||||
pip install tox
|
||||
```
|
||||
|
||||
@@ -63,7 +63,7 @@ TBD
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
||||
Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)!
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# How to test SAML as a developer without a server
|
||||
|
||||
https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great
|
||||
resource for being able to tinker with the SAML options within Synapse without needing to
|
||||
deploy and configure a complicated software stack.
|
||||
https://fujifish.github.io/samling/samling.html (https://github.com/fujifish/samling) is a great resource for being able to tinker with the
|
||||
SAML options within Synapse without needing to deploy and configure a complicated software stack.
|
||||
|
||||
To make Synapse (and therefore Riot) use it:
|
||||
To make Synapse (and therefore Element) use it:
|
||||
|
||||
1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab.
|
||||
2. Copy the XML to your clipboard.
|
||||
@@ -26,9 +25,9 @@ To make Synapse (and therefore Riot) use it:
|
||||
the dependencies are installed and ready to go.
|
||||
7. Restart Synapse.
|
||||
|
||||
Then in Riot:
|
||||
Then in Element:
|
||||
|
||||
1. Visit the login page with a Riot pointing at your homeserver.
|
||||
1. Visit the login page and point Element towards your homeserver using the `public_baseurl` above.
|
||||
2. Click the Single Sign-On button.
|
||||
3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`.
|
||||
The response must also be signed.
|
||||
|
||||
@@ -9,6 +9,8 @@ The available account validity callbacks are:
|
||||
|
||||
### `is_user_expired`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def is_user_expired(user: str) -> Optional[bool]
|
||||
```
|
||||
@@ -22,8 +24,15 @@ If the module returns `True`, the current request will be denied with the error
|
||||
`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
|
||||
invalidate the user's access token.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `on_user_registration`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def on_user_registration(user: str) -> None
|
||||
```
|
||||
@@ -31,3 +40,5 @@ async def on_user_registration(user: str) -> None
|
||||
Called after successfully registering a user, in case the module needs to perform extra
|
||||
operations to keep track of them. (e.g. add them to a database table). The user is
|
||||
represented by their Matrix user ID.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Synapse supports extending its functionality by configuring external modules.
|
||||
|
||||
**Note**: When using third-party modules, you effectively allow someone else to run
|
||||
custom code on your Synapse homeserver. Server admins are encouraged to verify the
|
||||
provenance of the modules they use on their homeserver and make sure the modules aren't
|
||||
running malicious code on their instance.
|
||||
|
||||
## Using modules
|
||||
|
||||
To use a module on Synapse, add it to the `modules` section of the configuration file:
|
||||
@@ -18,17 +23,31 @@ modules:
|
||||
Each module is defined by a path to a Python class as well as a configuration. This
|
||||
information for a given module should be available in the module's own documentation.
|
||||
|
||||
**Note**: When using third-party modules, you effectively allow someone else to run
|
||||
custom code on your Synapse homeserver. Server admins are encouraged to verify the
|
||||
provenance of the modules they use on their homeserver and make sure the modules aren't
|
||||
running malicious code on their instance.
|
||||
## Using multiple modules
|
||||
|
||||
Also note that we are currently in the process of migrating module interfaces to this
|
||||
system. While some interfaces might be compatible with it, others still require
|
||||
configuring modules in another part of Synapse's configuration file.
|
||||
The order in which modules are listed in this section is important. When processing an
|
||||
action that can be handled by several modules, Synapse will always prioritise the module
|
||||
that appears first (i.e. is the highest in the list). This means:
|
||||
|
||||
* If several modules register the same callback, the callback registered by the module
|
||||
that appears first is used.
|
||||
* If several modules try to register a handler for the same HTTP path, only the handler
|
||||
registered by the module that appears first is used. Handlers registered by the other
|
||||
module(s) are ignored and Synapse will log a warning message about them.
|
||||
|
||||
Note that Synapse doesn't allow multiple modules implementing authentication checkers via
|
||||
the password auth provider feature for the same login type with different fields. If this
|
||||
happens, Synapse will refuse to start.
|
||||
|
||||
## Current status
|
||||
|
||||
We are currently in the process of migrating module interfaces to this system. While some
|
||||
interfaces might be compatible with it, others still require configuring modules in
|
||||
another part of Synapse's configuration file.
|
||||
|
||||
Currently, only the following pre-existing interfaces are compatible with this new system:
|
||||
|
||||
* spam checker
|
||||
* third-party rules
|
||||
* presence router
|
||||
* password auth providers
|
||||
|
||||
176
docs/modules/password_auth_provider_callbacks.md
Normal file
176
docs/modules/password_auth_provider_callbacks.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# Password auth provider callbacks
|
||||
|
||||
Password auth providers offer a way for server administrators to integrate
|
||||
their Synapse installation with an external authentication system. The callbacks can be
|
||||
registered by using the Module API's `register_password_auth_provider_callbacks` method.
|
||||
|
||||
## Callbacks
|
||||
|
||||
### `auth_checkers`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```
|
||||
auth_checkers: Dict[Tuple[str,Tuple], Callable]
|
||||
```
|
||||
|
||||
A dict mapping from tuples of a login type identifier (such as `m.login.password`) and a
|
||||
tuple of field names (such as `("password", "secret_thing")`) to authentication checking
|
||||
callbacks, which should be of the following form:
|
||||
|
||||
```python
|
||||
async def check_auth(
|
||||
user: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
The login type and field names should be provided by the user in the
|
||||
request to the `/login` API. [The Matrix specification](https://matrix.org/docs/spec/client_server/latest#authentication-types)
|
||||
defines some types, however user defined ones are also allowed.
|
||||
|
||||
The callback is passed the `user` field provided by the client (which might not be in
|
||||
`@username:server` form), the login type, and a dictionary of login secrets passed by
|
||||
the client.
|
||||
|
||||
If the authentication is successful, the module must return the user's Matrix ID (e.g.
|
||||
`@alice:example.com`) and optionally a callback to be called with the response to the
|
||||
`/login` request. If the module doesn't wish to return a callback, it must return `None`
|
||||
instead.
|
||||
|
||||
If the authentication is unsuccessful, the module must return `None`.
|
||||
|
||||
If multiple modules register an auth checker for the same login type but with different
|
||||
fields, Synapse will refuse to start.
|
||||
|
||||
If multiple modules register an auth checker for the same login type with the same fields,
|
||||
then the callbacks will be executed in order, until one returns a Matrix User ID (and
|
||||
optionally a callback). In that case, the return value of that callback will be accepted
|
||||
and subsequent callbacks will not be fired. If every callback returns `None`, then the
|
||||
authentication fails.
|
||||
|
||||
### `check_3pid_auth`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```python
|
||||
async def check_3pid_auth(
|
||||
medium: str,
|
||||
address: str,
|
||||
password: str,
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]]
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
Called when a user attempts to register or log in with a third party identifier,
|
||||
such as email. It is passed the medium (eg. `email`), an address (eg. `jdoe@example.com`)
|
||||
and the user's password.
|
||||
|
||||
If the authentication is successful, the module must return the user's Matrix ID (e.g.
|
||||
`@alice:example.com`) and optionally a callback to be called with the response to the `/login` request.
|
||||
If the module doesn't wish to return a callback, it must return None instead.
|
||||
|
||||
If the authentication is unsuccessful, the module must return `None`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback. If every callback return `None`,
|
||||
the authentication is denied.
|
||||
|
||||
### `on_logged_out`
|
||||
|
||||
_First introduced in Synapse v1.46.0_
|
||||
|
||||
```python
|
||||
async def on_logged_out(
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
access_token: str
|
||||
) -> None
|
||||
```
|
||||
Called during a logout request for a user. It is passed the qualified user ID, the ID of the
|
||||
deactivated device (if any: access tokens are occasionally created without an associated
|
||||
device ID), and the (now deactivated) access token.
|
||||
|
||||
If multiple modules implement this callback, Synapse runs them all in order.
|
||||
|
||||
## Example
|
||||
|
||||
The example module below implements authentication checkers for two different login types:
|
||||
- `my.login.type`
|
||||
- Expects a `my_field` field to be sent to `/login`
|
||||
- Is checked by the method: `self.check_my_login`
|
||||
- `m.login.password` (defined in [the spec](https://matrix.org/docs/spec/client_server/latest#password-based))
|
||||
- Expects a `password` field to be sent to `/login`
|
||||
- Is checked by the method: `self.check_pass`
|
||||
|
||||
|
||||
```python
|
||||
from typing import Awaitable, Callable, Optional, Tuple
|
||||
|
||||
import synapse
|
||||
from synapse import module_api
|
||||
|
||||
|
||||
class MyAuthProvider:
|
||||
def __init__(self, config: dict, api: module_api):
|
||||
|
||||
self.api = api
|
||||
|
||||
self.credentials = {
|
||||
"bob": "building",
|
||||
"@scoop:matrix.org": "digging",
|
||||
}
|
||||
|
||||
api.register_password_auth_provider_callbacks(
|
||||
auth_checkers={
|
||||
("my.login_type", ("my_field",)): self.check_my_login,
|
||||
("m.login.password", ("password",)): self.check_pass,
|
||||
},
|
||||
)
|
||||
|
||||
async def check_my_login(
|
||||
self,
|
||||
username: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
|
||||
]
|
||||
]:
|
||||
if login_type != "my.login_type":
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("my_field"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
|
||||
async def check_pass(
|
||||
self,
|
||||
username: str,
|
||||
login_type: str,
|
||||
login_dict: "synapse.module_api.JsonDict",
|
||||
) -> Optional[
|
||||
Tuple[
|
||||
str,
|
||||
Optional[Callable[["synapse.module_api.LoginResponse"], Awaitable[None]]],
|
||||
]
|
||||
]:
|
||||
if login_type != "m.login.password":
|
||||
return None
|
||||
|
||||
if self.credentials.get(username) == login_dict.get("password"):
|
||||
return self.api.get_qualified_user_id(username)
|
||||
```
|
||||
@@ -12,6 +12,9 @@ should register this resource in its `__init__` method using the `register_web_r
|
||||
method from the `ModuleApi` class (see [this section](writing_a_module.html#registering-a-web-resource) for
|
||||
more info).
|
||||
|
||||
There is no longer a `get_db_schema_files` callback provided for password auth provider modules. Any
|
||||
changes to the database should now be made by the module using the module API class.
|
||||
|
||||
The module's author should also update any example in the module's configuration to only
|
||||
use the new `modules` section in Synapse's configuration file (see [this section](index.html#using-modules)
|
||||
for more info).
|
||||
|
||||
@@ -10,6 +10,8 @@ The available presence router callbacks are:
|
||||
|
||||
### `get_users_for_states`
|
||||
|
||||
_First introduced in Synapse v1.42.0_
|
||||
|
||||
```python
|
||||
async def get_users_for_states(
|
||||
state_updates: Iterable["synapse.api.UserPresenceState"],
|
||||
@@ -24,8 +26,14 @@ must return a dictionary that maps from Matrix user IDs (which can be local or r
|
||||
|
||||
Synapse will then attempt to send the specified presence updates to each user when possible.
|
||||
|
||||
If multiple modules implement this callback, Synapse merges all the dictionaries returned
|
||||
by the callbacks. If multiple callbacks return a dictionary containing the same key,
|
||||
Synapse concatenates the sets associated with this key from each dictionary.
|
||||
|
||||
### `get_interested_users`
|
||||
|
||||
_First introduced in Synapse v1.42.0_
|
||||
|
||||
```python
|
||||
async def get_interested_users(
|
||||
user_id: str
|
||||
@@ -44,6 +52,12 @@ query. The returned users can be local or remote.
|
||||
Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
|
||||
to indicate that the user should receive updates from all known users.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. Synapse
|
||||
calls each callback one by one, and use a concatenation of all the `set`s returned by the
|
||||
callbacks. If one callback returns `synapse.module_api.PRESENCE_ALL_USERS`, Synapse uses
|
||||
this value instead. If this happens, Synapse does not call any of the subsequent
|
||||
implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements both presence router callbacks, and ensures
|
||||
|
||||
@@ -10,6 +10,8 @@ The available spam checker callbacks are:
|
||||
|
||||
### `check_event_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
|
||||
```
|
||||
@@ -19,8 +21,37 @@ either a `bool` to indicate whether the event must be rejected because of spam,
|
||||
to indicate the event must be rejected because of spam and to give a rejection reason to
|
||||
forward to clients.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_join_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
|
||||
```
|
||||
|
||||
Called when a user is trying to join a room. The module must return a `bool` to indicate
|
||||
whether the user can join the room. The user is represented by their Matrix user ID (e.g.
|
||||
`@alice:example.com`) and the room is represented by its Matrix ID (e.g.
|
||||
`!room:example.com`). The module is also given a boolean to indicate whether the user
|
||||
currently has a pending invite in the room.
|
||||
|
||||
This callback isn't called if the join is performed by a server administrator, or in the
|
||||
context of a room creation.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_invite`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
|
||||
```
|
||||
@@ -29,8 +60,57 @@ Called when processing an invitation. The module must return a `bool` indicating
|
||||
the inviter can invite the invitee to the given room. Both inviter and invitee are
|
||||
represented by their Matrix user ID (e.g. `@alice:example.com`).
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_send_3pid_invite`
|
||||
|
||||
_First introduced in Synapse v1.45.0_
|
||||
|
||||
```python
|
||||
async def user_may_send_3pid_invite(
|
||||
inviter: str,
|
||||
medium: str,
|
||||
address: str,
|
||||
room_id: str,
|
||||
) -> bool
|
||||
```
|
||||
|
||||
Called when processing an invitation using a third-party identifier (also called a 3PID,
|
||||
e.g. an email address or a phone number). The module must return a `bool` indicating
|
||||
whether the inviter can invite the invitee to the given room.
|
||||
|
||||
The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
|
||||
invitee is represented by its medium (e.g. "email") and its address
|
||||
(e.g. `alice@example.com`). See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types)
|
||||
for more information regarding third-party identifiers.
|
||||
|
||||
For example, a call to this callback to send an invitation to the email address
|
||||
`alice@example.com` would look like this:
|
||||
|
||||
```python
|
||||
await user_may_send_3pid_invite(
|
||||
"@bob:example.com", # The inviter's user ID
|
||||
"email", # The medium of the 3PID to invite
|
||||
"alice@example.com", # The address of the 3PID to invite
|
||||
"!some_room:example.com", # The ID of the room to send the invite into
|
||||
)
|
||||
```
|
||||
|
||||
**Note**: If the third-party identifier is already associated with a matrix user ID,
|
||||
[`user_may_invite`](#user_may_invite) will be used instead.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room(user: str) -> bool
|
||||
```
|
||||
@@ -38,8 +118,15 @@ async def user_may_create_room(user: str) -> bool
|
||||
Called when processing a room creation request. The module must return a `bool` indicating
|
||||
whether the given user (represented by their Matrix user ID) is allowed to create a room.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room_with_invites`
|
||||
|
||||
_First introduced in Synapse v1.44.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room_with_invites(
|
||||
user: str,
|
||||
@@ -67,8 +154,15 @@ corresponding list(s) will be empty.
|
||||
since no invites are sent when cloning a room. To cover this case, modules also need to
|
||||
implement `user_may_create_room`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_create_room_alias`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
|
||||
```
|
||||
@@ -77,8 +171,15 @@ Called when trying to associate an alias with an existing room. The module must
|
||||
`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
|
||||
to set the given alias.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `user_may_publish_room`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def user_may_publish_room(user: str, room_id: str) -> bool
|
||||
```
|
||||
@@ -87,8 +188,15 @@ Called when trying to publish a room to the homeserver's public rooms directory.
|
||||
module must return a `bool` indicating whether the given user (represented by their
|
||||
Matrix user ID) is allowed to publish the given room.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_username_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
|
||||
```
|
||||
@@ -104,8 +212,15 @@ is represented as a dictionary with the following keys:
|
||||
The module is given a copy of the original dictionary, so modifying it from within the
|
||||
module cannot modify a user's profile when included in user directory search results.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_registration_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_registration_for_spam(
|
||||
email_threepid: Optional[dict],
|
||||
@@ -129,8 +244,16 @@ The arguments passed to this callback are:
|
||||
used during the registration process.
|
||||
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `RegistrationBehaviour.ALLOW`, Synapse falls through to the next one.
|
||||
The value of the first callback that does not return `RegistrationBehaviour.ALLOW` will
|
||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
||||
this callback.
|
||||
|
||||
### `check_media_file_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
```python
|
||||
async def check_media_file_for_spam(
|
||||
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
|
||||
@@ -141,6 +264,11 @@ async def check_media_file_for_spam(
|
||||
Called when storing a local or remote file. The module must return a boolean indicating
|
||||
whether the given file can be stored in the homeserver's media store.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the spam checker callback
|
||||
|
||||
@@ -10,6 +10,8 @@ The available third party rules callbacks are:
|
||||
|
||||
### `check_event_allowed`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_event_allowed(
|
||||
event: "synapse.events.EventBase",
|
||||
@@ -44,8 +46,15 @@ dictionary, and modify the returned dictionary accordingly.
|
||||
Note that replacing the event only works for events sent by local users, not for events
|
||||
received over federation.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `on_create_room`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def on_create_room(
|
||||
requester: "synapse.types.Requester",
|
||||
@@ -63,8 +72,16 @@ the request is a server admin.
|
||||
Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
|
||||
or deny the room's creation by raising a `module_api.errors.SynapseError`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns without raising an exception, Synapse falls through to the next one. The
|
||||
room creation will be forbidden as soon as one of the callbacks raises an exception. If
|
||||
this happens, Synapse will not call any of the subsequent implementations of this
|
||||
callback.
|
||||
|
||||
### `check_threepid_can_be_invited`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_threepid_can_be_invited(
|
||||
medium: str,
|
||||
@@ -76,8 +93,15 @@ async def check_threepid_can_be_invited(
|
||||
Called when processing an invite via a third-party identifier (i.e. email or phone number).
|
||||
The module must return a boolean indicating whether the invite can go through.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `check_visibility_can_be_modified`
|
||||
|
||||
_First introduced in Synapse v1.39.0_
|
||||
|
||||
```python
|
||||
async def check_visibility_can_be_modified(
|
||||
room_id: str,
|
||||
@@ -90,6 +114,11 @@ Called when changing the visibility of a room in the local public room directory
|
||||
visibility is a string that's either "public" or "private". The module must return a
|
||||
boolean indicating whether the change can go through.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `True`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `True` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the third-party rules callback
|
||||
|
||||
@@ -12,6 +12,21 @@ configuration associated with the module in Synapse's configuration file.
|
||||
See the documentation for the `ModuleApi` class
|
||||
[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
|
||||
|
||||
## When Synapse runs with several modules configured
|
||||
|
||||
If Synapse is running with other modules configured, the order each module appears in
|
||||
within the `modules` section of the Synapse configuration file might restrict what it can
|
||||
or cannot register. See [this section](index.html#using-multiple-modules) for more
|
||||
information.
|
||||
|
||||
On top of the rules listed in the link above, if a callback returns a value that should
|
||||
cause the current operation to fail (e.g. if a callback checking an event returns with a
|
||||
value that should cause the event to be denied), Synapse will fail the operation and
|
||||
ignore any subsequent callbacks that should have been run after this one.
|
||||
|
||||
The documentation for each callback mentions how Synapse behaves when
|
||||
multiple modules implement it.
|
||||
|
||||
## Handling the module's configuration
|
||||
|
||||
A module can implement the following static method:
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
<h2 style="color:red">
|
||||
This page of the Synapse documentation is now deprecated. For up to date
|
||||
documentation on setting up or writing a password auth provider module, please see
|
||||
<a href="modules.md">this page</a>.
|
||||
</h2>
|
||||
|
||||
# Password auth provider modules
|
||||
|
||||
Password auth providers offer a way for server administrators to
|
||||
|
||||
@@ -472,6 +472,48 @@ limit_remote_rooms:
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
# A list of domains that the domain portion of 'next_link' parameters
|
||||
# must match.
|
||||
#
|
||||
# This parameter is optionally provided by clients while requesting
|
||||
# validation of an email or phone number, and maps to a link that
|
||||
# users will be automatically redirected to after validation
|
||||
# succeeds. Clients can make use this parameter to aid the validation
|
||||
# process.
|
||||
#
|
||||
# The whitelist is applied whether the homeserver or an
|
||||
# identity server is handling validation.
|
||||
#
|
||||
# The default value is no whitelist functionality; all domains are
|
||||
# allowed. Setting this value to an empty list will instead disallow
|
||||
# all domains.
|
||||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
@@ -541,47 +583,6 @@ retention:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
# A list of domains that the domain portion of 'next_link' parameters
|
||||
# must match.
|
||||
#
|
||||
# This parameter is optionally provided by clients while requesting
|
||||
# validation of an email or phone number, and maps to a link that
|
||||
# users will be automatically redirected to after validation
|
||||
# succeeds. Clients can make use this parameter to aid the validation
|
||||
# process.
|
||||
#
|
||||
# The whitelist is applied whether the homeserver or an
|
||||
# identity server is handling validation.
|
||||
#
|
||||
# The default value is no whitelist functionality; all domains are
|
||||
# allowed. Setting this value to an empty list will instead disallow
|
||||
# all domains.
|
||||
#
|
||||
#next_link_domain_whitelist: ["matrix.org"]
|
||||
|
||||
# Templates to use when generating email or HTML page contents.
|
||||
#
|
||||
templates:
|
||||
# Directory in which Synapse will try to find template files to use to generate
|
||||
# email or HTML page contents.
|
||||
# If not set, or a file is not found within the template directory, a default
|
||||
# template from within the Synapse package will be used.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/templates.html for more
|
||||
# information about using custom templates.
|
||||
#
|
||||
#custom_template_directory: /path/to/custom/templates/
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -2260,34 +2261,6 @@ email:
|
||||
#email_validation: "[%(server_name)s] Validate your email"
|
||||
|
||||
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
# respectively.
|
||||
#
|
||||
password_providers:
|
||||
# # Example config for an LDAP auth provider
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
|
||||
|
||||
|
||||
## Push ##
|
||||
|
||||
|
||||
@@ -85,6 +85,15 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.45.0
|
||||
|
||||
## Changes required to media storage provider modules when reading from the Synapse configuration object
|
||||
|
||||
Media storage provider modules that read from the Synapse configuration object (i.e. that
|
||||
read the value of `hs.config.[...]`) now need to specify the configuration section they're
|
||||
reading from. This means that if a module reads the value of e.g. `hs.config.media_store_path`,
|
||||
it needs to replace it with `hs.config.media.media_store_path`.
|
||||
|
||||
# Upgrading to v1.44.0
|
||||
|
||||
## The URL preview cache is no longer mirrored to storage providers
|
||||
@@ -178,8 +187,8 @@ of this endpoint modifying the media store.
|
||||
|
||||
The current third-party rules module interface is deprecated in favour of the new generic
|
||||
modules system introduced in Synapse v1.37.0. Authors of third-party rules modules can refer
|
||||
to [this documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface)
|
||||
to update their modules. Synapse administrators can refer to [this documentation](modules.md#using-modules)
|
||||
to [this documentation](modules/porting_legacy_module.md)
|
||||
to update their modules. Synapse administrators can refer to [this documentation](modules/index.md)
|
||||
to update their configuration once the modules they are using have been updated.
|
||||
|
||||
We plan to remove support for the current third-party rules interface in September 2021.
|
||||
@@ -228,9 +237,9 @@ SQLite databases are unaffected by this change.
|
||||
|
||||
The current spam checker interface is deprecated in favour of a new generic modules system.
|
||||
Authors of spam checker modules can refer to [this
|
||||
documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface)
|
||||
documentation](modules/porting_legacy_module.md
|
||||
to update their modules. Synapse administrators can refer to [this
|
||||
documentation](modules.md#using-modules)
|
||||
documentation](modules/index.md)
|
||||
to update their configuration once the modules they are using have been updated.
|
||||
|
||||
We plan to remove support for the current spam checker interface in August 2021.
|
||||
@@ -339,24 +348,24 @@ Please ensure your Application Services are up to date.
|
||||
## Requirement for X-Forwarded-Proto header
|
||||
|
||||
When using Synapse with a reverse proxy (in particular, when using the
|
||||
[x_forwarded]{.title-ref} option on an HTTP listener), Synapse now
|
||||
expects to receive an [X-Forwarded-Proto]{.title-ref} header on incoming
|
||||
`x_forwarded` option on an HTTP listener), Synapse now
|
||||
expects to receive an `X-Forwarded-Proto` header on incoming
|
||||
HTTP requests. If it is not set, Synapse will log a warning on each
|
||||
received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure
|
||||
that the reverse proxy sets [X-Forwarded-Proto]{.title-ref} header to
|
||||
[https]{.title-ref} or [http]{.title-ref} to indicate the protocol used
|
||||
that the reverse proxy sets `X-Forwarded-Proto` header to
|
||||
`https` or `http` to indicate the protocol used
|
||||
by the client.
|
||||
|
||||
Synapse also requires the [Host]{.title-ref} header to be preserved.
|
||||
Synapse also requires the `Host` header to be preserved.
|
||||
|
||||
See the [reverse proxy documentation](reverse_proxy.md), where the
|
||||
example configurations have been updated to show how to set these
|
||||
headers.
|
||||
|
||||
(Users of [Caddy](https://caddyserver.com/) are unaffected, since we
|
||||
believe it sets [X-Forwarded-Proto]{.title-ref} by default.)
|
||||
believe it sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
# Upgrading to v1.27.0
|
||||
|
||||
@@ -520,13 +529,13 @@ mapping provider to specify different algorithms, instead of the
|
||||
way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>).
|
||||
|
||||
If your Synapse configuration uses a custom mapping provider
|
||||
([oidc_config.user_mapping_provider.module]{.title-ref} is specified and
|
||||
(`oidc_config.user_mapping_provider.module` is specified and
|
||||
not equal to
|
||||
[synapse.handlers.oidc_handler.JinjaOidcMappingProvider]{.title-ref})
|
||||
then you *must* ensure that [map_user_attributes]{.title-ref} of the
|
||||
`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`)
|
||||
then you *must* ensure that `map_user_attributes` of the
|
||||
mapping provider performs some normalisation of the
|
||||
[localpart]{.title-ref} returned. To match previous behaviour you can
|
||||
use the [map_username_to_mxid_localpart]{.title-ref} function provided
|
||||
`localpart` returned. To match previous behaviour you can
|
||||
use the `map_username_to_mxid_localpart` function provided
|
||||
by Synapse. An example is shown below:
|
||||
|
||||
```python
|
||||
@@ -555,7 +564,7 @@ v1.24.0. The Admin API is now only accessible under:
|
||||
|
||||
- `/_synapse/admin/v1`
|
||||
|
||||
The only exception is the [/admin/whois]{.title-ref} endpoint, which is
|
||||
The only exception is the `/admin/whois` endpoint, which is
|
||||
[also available via the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
|
||||
|
||||
@@ -630,7 +639,7 @@ This page will appear to the user after clicking a password reset link
|
||||
that has been emailed to them.
|
||||
|
||||
To complete password reset, the page must include a way to make a
|
||||
[POST]{.title-ref} request to
|
||||
`POST` request to
|
||||
`/_synapse/client/password_reset/{medium}/submit_token` with the query
|
||||
parameters from the original link, presented as a URL-encoded form. See
|
||||
the file itself for more details.
|
||||
@@ -651,18 +660,18 @@ but the parameters are slightly different:
|
||||
|
||||
# Upgrading to v1.18.0
|
||||
|
||||
## Docker [-py3]{.title-ref} suffix will be removed in future versions
|
||||
## Docker `-py3` suffix will be removed in future versions
|
||||
|
||||
From 10th August 2020, we will no longer publish Docker images with the
|
||||
[-py3]{.title-ref} tag suffix. The images tagged with the
|
||||
[-py3]{.title-ref} suffix have been identical to the non-suffixed tags
|
||||
`-py3` tag suffix. The images tagged with the
|
||||
`-py3` suffix have been identical to the non-suffixed tags
|
||||
since release 0.99.0, and the suffix is obsolete.
|
||||
|
||||
On 10th August, we will remove the [latest-py3]{.title-ref} tag.
|
||||
Existing per-release tags (such as [v1.18.0-py3]{.title-ref}) will not
|
||||
be removed, but no new [-py3]{.title-ref} tags will be added.
|
||||
On 10th August, we will remove the `latest-py3` tag.
|
||||
Existing per-release tags (such as `v1.18.0-py3` will not
|
||||
be removed, but no new `-py3` tags will be added.
|
||||
|
||||
Scripts relying on the [-py3]{.title-ref} suffix will need to be
|
||||
Scripts relying on the `-py3` suffix will need to be
|
||||
updated.
|
||||
|
||||
## Redis replication is now recommended in lieu of TCP replication
|
||||
@@ -696,8 +705,8 @@ This will *not* be a problem for Synapse installations which were:
|
||||
If completeness of the room directory is a concern, installations which
|
||||
are affected can be repaired as follows:
|
||||
|
||||
1. Run the following sql from a [psql]{.title-ref} or
|
||||
[sqlite3]{.title-ref} console:
|
||||
1. Run the following sql from a `psql` or
|
||||
`sqlite3` console:
|
||||
|
||||
```sql
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
@@ -761,8 +770,8 @@ participating in many rooms.
|
||||
of any problems.
|
||||
|
||||
1. As an initial check to see if you will be affected, you can try
|
||||
running the following query from the [psql]{.title-ref} or
|
||||
[sqlite3]{.title-ref} console. It is safe to run it while Synapse is
|
||||
running the following query from the `psql` or
|
||||
`sqlite3` console. It is safe to run it while Synapse is
|
||||
still running.
|
||||
|
||||
```sql
|
||||
@@ -1344,9 +1353,9 @@ first need to upgrade the database by running:
|
||||
|
||||
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
|
||||
|
||||
Where [<db>]{.title-ref} is the location of the database,
|
||||
[<server_name>]{.title-ref} is the server name as specified in the
|
||||
synapse configuration, and [<signing_key>]{.title-ref} is the location
|
||||
Where `<db>` is the location of the database,
|
||||
`<server_name>` is the server name as specified in the
|
||||
synapse configuration, and `<signing_key>` is the location
|
||||
of the signing key as specified in the synapse configuration.
|
||||
|
||||
This may take some time to complete. Failures of signatures and content
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# Registration Tokens
|
||||
|
||||
This API allows you to manage tokens which can be used to authenticate
|
||||
registration requests, as proposed in [MSC3231](https://github.com/govynnus/matrix-doc/blob/token-registration/proposals/3231-token-authenticated-registration.md).
|
||||
registration requests, as proposed in
|
||||
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
|
||||
To use it, you will need to enable the `registration_requires_token` config
|
||||
option, and authenticate by providing an `access_token` for a server admin:
|
||||
see [Admin API](../../usage/administration/admin_api).
|
||||
@@ -148,7 +149,7 @@ POST /_synapse/admin/v1/registration_tokens/new
|
||||
|
||||
The request body must be a JSON object and can contain the following fields:
|
||||
- `token`: The registration token. A string of no more than 64 characters that
|
||||
consists only of characters matched by the regex `[A-Za-z0-9-_]`.
|
||||
consists only of characters matched by the regex `[A-Za-z0-9._~-]`.
|
||||
Default: randomly generated.
|
||||
- `uses_allowed`: The integer number of times the token can be used to complete
|
||||
a registration before it becomes invalid.
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
Below is a sample logging configuration file. This file can be tweaked to control how your
|
||||
homeserver will output logs. A restart of the server is generally required to apply any
|
||||
changes made to this file.
|
||||
changes made to this file. The value of the `log_config` option in your homeserver
|
||||
config should be the path to this file.
|
||||
|
||||
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
Note that a default logging configuration (shown below) is created automatically alongside
|
||||
the homeserver config when following the [installation instructions](../../setup/installation.md).
|
||||
It should be named `<SERVERNAME>.log.config` by default.
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_log_config.yaml}}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,4 +1,79 @@
|
||||
# Introduction
|
||||
|
||||
Welcome to the documentation repository for Synapse, the reference
|
||||
[Matrix](https://matrix.org) homeserver implementation.
|
||||
Welcome to the documentation repository for Synapse, a
|
||||
[Matrix](https://matrix.org) homeserver implementation developed by the matrix.org core
|
||||
team.
|
||||
|
||||
## Installing and using Synapse
|
||||
|
||||
This documentation covers topics for **installation**, **configuration** and
|
||||
**maintainence** of your Synapse process:
|
||||
|
||||
* Learn how to [install](setup/installation.md) and
|
||||
[configure](usage/configuration/index.html) your own instance, perhaps with [Single
|
||||
Sign-On](usage/configuration/user_authentication/index.html).
|
||||
|
||||
* See how to [upgrade](upgrade.md) between Synapse versions.
|
||||
|
||||
* Administer your instance using the [Admin
|
||||
API](usage/administration/admin_api/index.html), installing [pluggable
|
||||
modules](modules/index.html), or by accessing the [manhole](manhole.md).
|
||||
|
||||
* Learn how to [read log lines](usage/administration/request_log.md), configure
|
||||
[logging](usage/configuration/logging_sample_config.md) or set up [structured
|
||||
logging](structured_logging.md).
|
||||
|
||||
* Scale Synapse through additional [worker processes](workers.md).
|
||||
|
||||
* Set up [monitoring and metrics](metrics-howto.md) to keep an eye on your
|
||||
Synapse instance's performance.
|
||||
|
||||
## Developing on Synapse
|
||||
|
||||
Contributions are welcome! Synapse is primarily written in
|
||||
[Python](https://python.org). As a developer, you may be interested in the
|
||||
following documentation:
|
||||
|
||||
* Read the [Contributing Guide](development/contributing_guide.md). It is meant
|
||||
to walk new contributors through the process of developing and submitting a
|
||||
change to the Synapse codebase (which is [hosted on
|
||||
GitHub](https://github.com/matrix-org/synapse)).
|
||||
|
||||
* Set up your [development
|
||||
environment](development/contributing_guide.md#2-what-do-i-need), then learn
|
||||
how to [lint](development/contributing_guide.md#run-the-linters) and
|
||||
[test](development/contributing_guide.md#8-test-test-test) your code.
|
||||
|
||||
* Look at [the issue tracker](https://github.com/matrix-org/synapse/issues) for
|
||||
bugs to fix or features to add. If you're new, it may be best to start with
|
||||
those labeled [good first
|
||||
issue](https://github.com/matrix-org/synapse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).
|
||||
|
||||
* Understand [how Synapse is
|
||||
built](development/internal_documentation/index.html), how to [migrate
|
||||
database schemas](development/database_schema.md), learn about
|
||||
[federation](federate.md) and how to [set up a local
|
||||
federation](federate.md#running-a-demo-federation-of-synapses) for development.
|
||||
|
||||
* We like to keep our `git` history clean. [Learn](development/git.md) how to
|
||||
do so!
|
||||
|
||||
* And finally, contribute to this documentation! The source for which is
|
||||
[located here](https://github.com/matrix-org/synapse/tree/develop/docs).
|
||||
|
||||
## Donating to Synapse development
|
||||
|
||||
Want to help keep Synapse going but don't know how to code? Synapse is a
|
||||
[Matrix.org Foundation](https://matrix.org) project. Consider becoming a
|
||||
supportor on [Liberapay](https://liberapay.com/matrixdotorg),
|
||||
[Patreon](https://patreon.com/matrixdotorg) or through
|
||||
[PayPal](https://paypal.me/matrixdotorg) via a one-time donation.
|
||||
|
||||
If you are an organisation or enterprise and would like to sponsor development,
|
||||
reach out to us over email at: support (at) matrix.org
|
||||
|
||||
## Reporting a security vulnerability
|
||||
|
||||
If you've found a security issue in Synapse or any other Matrix.org Foundation
|
||||
project, please report it to us in accordance with our [Security Disclosure
|
||||
Policy](https://www.matrix.org/security-disclosure-policy/). Thank you!
|
||||
|
||||
197
mypy.ini
197
mypy.ini
@@ -22,8 +22,11 @@ files =
|
||||
synapse/crypto,
|
||||
synapse/event_auth.py,
|
||||
synapse/events/builder.py,
|
||||
synapse/events/presence_router.py,
|
||||
synapse/events/snapshot.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/events/third_party_rules.py,
|
||||
synapse/events/utils.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
@@ -53,6 +56,7 @@ files =
|
||||
synapse/storage/_base.py,
|
||||
synapse/storage/background_updates.py,
|
||||
synapse/storage/databases/main/appservice.py,
|
||||
synapse/storage/databases/main/client_ips.py,
|
||||
synapse/storage/databases/main/events.py,
|
||||
synapse/storage/databases/main/keys.py,
|
||||
synapse/storage/databases/main/pusher.py,
|
||||
@@ -88,23 +92,71 @@ files =
|
||||
tests/handlers/test_user_directory.py,
|
||||
tests/rest/client/test_login.py,
|
||||
tests/rest/client/test_auth.py,
|
||||
tests/rest/client/test_relations.py,
|
||||
tests/rest/media/v1/test_filepath.py,
|
||||
tests/rest/media/v1/test_oembed.py,
|
||||
tests/storage/test_state.py,
|
||||
tests/storage/test_user_directory.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-synapse.api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.crypto.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.events.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.push.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.server_notices.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.state.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.client_ips]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.streams.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.batching_queue]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.cached_call]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.dictionary_cache]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.lrucache]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.response_cache]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.stream_change_cache]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.ttl_cache]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.daemonize]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.file_consumer]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -141,6 +193,9 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.util.msisdn]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.patch_inline_callbacks]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.ratelimitutils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -162,98 +217,106 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.util.wheel_timer]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
ignore_missing_imports = True
|
||||
[mypy-synapse.util.versionstring]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-zope]
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
;; The `typeshed` project maintains stubs here:
|
||||
;; https://github.com/python/typeshed/tree/master/stubs
|
||||
;; and for each package `foo` there's a corresponding `types-foo` package on PyPI,
|
||||
;; which we can pull in as a dev dependency by adding to `setup.py`'s
|
||||
;; `CONDITIONAL_REQUIREMENTS["mypy"]` list.
|
||||
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-bcrypt]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-constantly]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-twisted.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hyperlink]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-h11]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-opentracing]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-OpenSSL.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jsonschema]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-signedjson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-prometheus_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
[mypy-constantly]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-daemonize]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-sentry_sdk]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-PIL.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-nacl.*]
|
||||
[mypy-h11]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hiredis]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hyperlink]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-josepy.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-nacl.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-opentracing]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
[mypy-prometheus_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-sentry_sdk]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-signedjson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-twisted.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -27,6 +27,7 @@ DISTS = (
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
"ubuntu:impish", # 21.10 (EOL 2022-07)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -90,10 +90,10 @@ else
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts/update_synapse_database"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"scripts-dev/update_database"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".ci"
|
||||
)
|
||||
fi
|
||||
|
||||
@@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
echo "Running db background jobs..."
|
||||
scripts-dev/update_database --database-config "$SQLITE_CONFIG"
|
||||
scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
|
||||
@@ -35,6 +35,19 @@ from github import Github
|
||||
from packaging import version
|
||||
|
||||
|
||||
def run_until_successful(command, *args, **kwargs):
|
||||
while True:
|
||||
completed_process = subprocess.run(command, *args, **kwargs)
|
||||
exit_code = completed_process.returncode
|
||||
if exit_code == 0:
|
||||
# successful, so nothing more to do here.
|
||||
return completed_process
|
||||
|
||||
print(f"The command {command!r} failed with exit code {exit_code}.")
|
||||
print("Please try to correct the failure and then re-run.")
|
||||
click.confirm("Try again?", abort=True)
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""An interactive script to walk through the parts of creating a release.
|
||||
@@ -197,7 +210,7 @@ def prepare():
|
||||
f.write(parsed_synapse_ast.dumps())
|
||||
|
||||
# Generate changelogs
|
||||
subprocess.run("python3 -m towncrier", shell=True)
|
||||
run_until_successful("python3 -m towncrier", shell=True)
|
||||
|
||||
# Generate debian changelogs
|
||||
if parsed_new_version.pre is not None:
|
||||
@@ -209,11 +222,11 @@ def prepare():
|
||||
else:
|
||||
debian_version = new_version
|
||||
|
||||
subprocess.run(
|
||||
run_until_successful(
|
||||
f'dch -M -v {debian_version} "New synapse release {debian_version}."',
|
||||
shell=True,
|
||||
)
|
||||
subprocess.run('dch -M -r -D stable ""', shell=True)
|
||||
run_until_successful('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
repo.git.add("-u")
|
||||
@@ -224,7 +237,7 @@ def prepare():
|
||||
|
||||
# Commit the changes.
|
||||
repo.git.add("-u")
|
||||
repo.git.commit(f"-m {new_version}")
|
||||
repo.git.commit("-m", new_version)
|
||||
|
||||
# We give the option to bail here in case the user wants to make sure things
|
||||
# are OK before pushing.
|
||||
@@ -239,6 +252,8 @@ def prepare():
|
||||
# Otherwise, push and open the changelog in the browser.
|
||||
repo.git.push("-u", repo.remote().name, repo.active_branch.name)
|
||||
|
||||
print("Opening the changelog in your browser...")
|
||||
print("Please ask others to give it a check.")
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
@@ -290,7 +305,19 @@ def tag(gh_token: Optional[str]):
|
||||
|
||||
# If no token was given, we bail here
|
||||
if not gh_token:
|
||||
print("Launching the GitHub release page in your browser.")
|
||||
print("Please correct the title and create a draft.")
|
||||
if current_version.is_prerelease:
|
||||
print("As this is an RC, remember to mark it as a pre-release!")
|
||||
print("(by the way, this step can be automated by passing --gh-token,")
|
||||
print("or one of the GH_TOKEN or GITHUB_TOKEN env vars.)")
|
||||
click.launch(f"https://github.com/matrix-org/synapse/releases/edit/{tag_name}")
|
||||
|
||||
print("Once done, you need to wait for the release assets to build.")
|
||||
if click.confirm("Launch the release assets actions page?", default=True):
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}"
|
||||
)
|
||||
return
|
||||
|
||||
# Create a new draft release
|
||||
@@ -305,6 +332,7 @@ def tag(gh_token: Optional[str]):
|
||||
)
|
||||
|
||||
# Open the release and the actions where we are building the assets.
|
||||
print("Launching the release page and the actions page.")
|
||||
click.launch(release.html_url)
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}"
|
||||
|
||||
@@ -51,13 +51,19 @@ Example usage:
|
||||
"request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-K",
|
||||
"--signing-key",
|
||||
help="The private ed25519 key to sign the request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
default="homeserver.yaml",
|
||||
help=(
|
||||
"Path to synapse config file, from which the server name and/or signing "
|
||||
"key path will be read. Ignored if --server-name and --signing-key-path "
|
||||
"key path will be read. Ignored if --server-name and --signing-key(-path) "
|
||||
"are both given."
|
||||
),
|
||||
)
|
||||
@@ -87,11 +93,14 @@ Example usage:
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
if not args.server_name or not (args.signing_key_path or args.signing_key):
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
if args.signing_key:
|
||||
keys = read_signing_keys([args.signing_key])
|
||||
else:
|
||||
with open(args.signing_key_path) as f:
|
||||
keys = read_signing_keys(f)
|
||||
|
||||
json_to_sign = args.input_data
|
||||
if json_to_sign is None:
|
||||
@@ -107,7 +116,7 @@ Example usage:
|
||||
print("Input json was not an object", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
sign_json(obj, args.server_name, key)
|
||||
sign_json(obj, args.server_name, keys[0])
|
||||
for c in json_encoder.iterencode(obj):
|
||||
args.output.write(c)
|
||||
args.output.write("\n")
|
||||
@@ -118,8 +127,17 @@ def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
if not args.signing_key_path and not args.signing_key:
|
||||
if "signing_key" in config:
|
||||
args.signing_key = config["signing_key"]
|
||||
elif "signing_key_path" in config:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
else:
|
||||
print(
|
||||
"A signing key must be given on the commandline or in the config file.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -215,7 +215,7 @@ class MockHomeserver:
|
||||
def __init__(self, config):
|
||||
self.clock = Clock(reactor)
|
||||
self.config = config
|
||||
self.hostname = config.server_name
|
||||
self.hostname = config.server.server_name
|
||||
self.version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
def get_clock(self):
|
||||
@@ -583,7 +583,7 @@ class Porter(object):
|
||||
return
|
||||
|
||||
self.postgres_store = self.build_db_store(
|
||||
self.hs_config.get_single_database()
|
||||
self.hs_config.database.get_single_database()
|
||||
)
|
||||
|
||||
await self.run_background_updates_on_postgres()
|
||||
@@ -1069,7 +1069,7 @@ class CursesProgress(Progress):
|
||||
|
||||
self.stdscr.addstr(0, 0, status, curses.A_BOLD)
|
||||
|
||||
max_len = max([len(t) for t in self.tables.keys()])
|
||||
max_len = max(len(t) for t in self.tables.keys())
|
||||
|
||||
left_margin = 5
|
||||
middle_space = 1
|
||||
|
||||
@@ -36,16 +36,35 @@ class MockHomeserver(HomeServer):
|
||||
|
||||
def __init__(self, config, **kwargs):
|
||||
super(MockHomeserver, self).__init__(
|
||||
config.server_name, reactor=reactor, config=config, **kwargs
|
||||
config.server.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
self.version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def run_background_updates(hs):
|
||||
store = hs.get_datastore()
|
||||
|
||||
async def run_background_updates():
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
def run():
|
||||
# Apply all background updates on the database.
|
||||
defer.ensureDeferred(
|
||||
run_as_background_process("background_updates", run_background_updates)
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(run)
|
||||
|
||||
reactor.run()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Updates a synapse database to the latest schema and runs background updates"
|
||||
"Updates a synapse database to the latest schema and optionally runs background updates"
|
||||
" on it."
|
||||
)
|
||||
)
|
||||
@@ -54,7 +73,13 @@ if __name__ == "__main__":
|
||||
"--database-config",
|
||||
type=argparse.FileType("r"),
|
||||
required=True,
|
||||
help="A database config file for either a SQLite3 database or a PostgreSQL one.",
|
||||
help="Synapse configuration file, giving the details of the database to be updated",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run-background-updates",
|
||||
action="store_true",
|
||||
required=False,
|
||||
help="run background updates after upgrading the database schema",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -82,19 +107,10 @@ if __name__ == "__main__":
|
||||
# Setup instantiates the store within the homeserver object and updates the
|
||||
# DB.
|
||||
hs.setup()
|
||||
store = hs.get_datastore()
|
||||
|
||||
async def run_background_updates():
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
if args.run_background_updates:
|
||||
run_background_updates(hs)
|
||||
|
||||
def run():
|
||||
# Apply all background updates on the database.
|
||||
defer.ensureDeferred(
|
||||
run_as_background_process("background_updates", run_background_updates)
|
||||
)
|
||||
|
||||
reactor.callWhenRunning(run)
|
||||
|
||||
reactor.run()
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
32
setup.py
32
setup.py
@@ -103,17 +103,17 @@ CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [
|
||||
# The following are used by the release script
|
||||
"click==7.1.2",
|
||||
"redbaron==0.9.2",
|
||||
"GitPython==3.1.14",
|
||||
"commonmark==0.9.1",
|
||||
"pygithub==1.55",
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = [
|
||||
"mypy==0.910",
|
||||
"mypy-zope==0.3.2",
|
||||
"types-bleach>=4.1.0",
|
||||
"types-jsonschema>=3.2.0",
|
||||
"types-Pillow>=8.3.4",
|
||||
"types-pyOpenSSL>=20.0.7",
|
||||
"types-PyYAML>=5.4.10",
|
||||
"types-setuptools>=57.4.0",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
@@ -121,6 +121,20 @@ CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"]
|
||||
# parameterized_class decorator was introduced in parameterized 0.7.0
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["dev"] = (
|
||||
CONDITIONAL_REQUIREMENTS["lint"]
|
||||
+ CONDITIONAL_REQUIREMENTS["mypy"]
|
||||
+ CONDITIONAL_REQUIREMENTS["test"]
|
||||
+ [
|
||||
# The following are used by the release script
|
||||
"click==7.1.2",
|
||||
"redbaron==0.9.2",
|
||||
"GitPython==3.1.14",
|
||||
"commonmark==0.9.1",
|
||||
"pygithub==1.55",
|
||||
]
|
||||
)
|
||||
|
||||
setup(
|
||||
name="matrix-synapse",
|
||||
version=version,
|
||||
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.44.0"
|
||||
__version__ = "1.46.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -245,7 +245,7 @@ class Auth:
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
):
|
||||
) -> None:
|
||||
"""Validates that the app service is allowed to control
|
||||
the given user.
|
||||
|
||||
@@ -618,5 +618,13 @@ class Auth:
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
async def check_auth_blocking(self, *args, **kwargs) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
async def check_auth_blocking(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
threepid: Optional[dict] = None,
|
||||
user_type: Optional[str] = None,
|
||||
requester: Optional[Requester] = None,
|
||||
) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(
|
||||
user_id=user_id, threepid=threepid, user_type=user_type, requester=requester
|
||||
)
|
||||
|
||||
@@ -176,6 +176,7 @@ class RelationTypes:
|
||||
ANNOTATION = "m.annotation"
|
||||
REPLACE = "m.replace"
|
||||
REFERENCE = "m.reference"
|
||||
THREAD = "io.element.thread"
|
||||
|
||||
|
||||
class LimitBlockingTypes:
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
import logging
|
||||
import typing
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from twisted.web import http
|
||||
|
||||
@@ -143,7 +143,7 @@ class SynapseError(CodeMessageException):
|
||||
super().__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode)
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ class ProxiedRequestError(SynapseError):
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
|
||||
@@ -196,7 +196,7 @@ class ConsentNotGivenError(SynapseError):
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||
|
||||
|
||||
@@ -262,14 +262,10 @@ class InteractiveAuthIncompleteError(Exception):
|
||||
class UnrecognizedRequestError(SynapseError):
|
||||
"""An error indicating we don't understand the request you're trying to make"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.UNRECOGNIZED
|
||||
if len(args) == 0:
|
||||
message = "Unrecognized request"
|
||||
else:
|
||||
message = args[0]
|
||||
super().__init__(400, message, **kwargs)
|
||||
def __init__(
|
||||
self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
|
||||
):
|
||||
super().__init__(400, msg, errcode)
|
||||
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
@@ -284,10 +280,8 @@ class AuthError(SynapseError):
|
||||
other poorly-defined times.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.FORBIDDEN
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN):
|
||||
super().__init__(code, msg, errcode)
|
||||
|
||||
|
||||
class InvalidClientCredentialsError(SynapseError):
|
||||
@@ -321,7 +315,7 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||
self._soft_logout = soft_logout
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
d = super().error_dict()
|
||||
d["soft_logout"] = self._soft_logout
|
||||
return d
|
||||
@@ -345,7 +339,7 @@ class ResourceLimitError(SynapseError):
|
||||
self.limit_type = limit_type
|
||||
super().__init__(code, msg, errcode=errcode)
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
@@ -357,32 +351,17 @@ class ResourceLimitError(SynapseError):
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.TOO_LARGE
|
||||
super().__init__(413, *args, **kwargs)
|
||||
|
||||
|
||||
class EventStreamError(SynapseError):
|
||||
"""An error raised when there a problem with the event stream."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.BAD_PAGINATION
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, msg: str):
|
||||
super().__init__(413, msg, Codes.TOO_LARGE)
|
||||
|
||||
|
||||
class LoginError(SynapseError):
|
||||
"""An error raised when there was a problem logging in."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StoreError(SynapseError):
|
||||
"""An error raised when there was a problem storing some data."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InvalidCaptchaError(SynapseError):
|
||||
def __init__(
|
||||
@@ -395,7 +374,7 @@ class InvalidCaptchaError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.error_url = error_url
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, error_url=self.error_url)
|
||||
|
||||
|
||||
@@ -412,7 +391,7 @@ class LimitExceededError(SynapseError):
|
||||
super().__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||
|
||||
|
||||
@@ -443,10 +422,8 @@ class UnsupportedRoomVersionError(SynapseError):
|
||||
class ThreepidValidationError(SynapseError):
|
||||
"""An error raised when there was a problem authorising an event."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.FORBIDDEN
|
||||
super().__init__(*args, **kwargs)
|
||||
def __init__(self, msg: str, errcode: str = Codes.FORBIDDEN):
|
||||
super().__init__(400, msg, errcode)
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
@@ -466,7 +443,7 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
|
||||
self._room_version = room_version
|
||||
|
||||
def error_dict(self):
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, room_version=self._room_version)
|
||||
|
||||
|
||||
@@ -494,7 +471,7 @@ class RequestSendFailed(RuntimeError):
|
||||
errors (like programming errors).
|
||||
"""
|
||||
|
||||
def __init__(self, inner_exception, can_retry):
|
||||
def __init__(self, inner_exception: BaseException, can_retry: bool):
|
||||
super().__init__(
|
||||
"Failed to send request: %s: %s"
|
||||
% (type(inner_exception).__name__, inner_exception)
|
||||
@@ -503,7 +480,7 @@ class RequestSendFailed(RuntimeError):
|
||||
self.can_retry = can_retry
|
||||
|
||||
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
|
||||
"""Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
|
||||
@@ -551,7 +528,7 @@ class FederationError(RuntimeError):
|
||||
msg = "%s %s: %s" % (level, code, reason)
|
||||
super().__init__(msg)
|
||||
|
||||
def get_dict(self):
|
||||
def get_dict(self) -> "JsonDict":
|
||||
return {
|
||||
"level": self.level,
|
||||
"code": self.code,
|
||||
@@ -580,7 +557,7 @@ class HttpResponseException(CodeMessageException):
|
||||
super().__init__(code, msg)
|
||||
self.response = response
|
||||
|
||||
def to_synapse_error(self):
|
||||
def to_synapse_error(self) -> SynapseError:
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
|
||||
@@ -15,7 +15,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
from typing import List
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Container,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
@@ -23,7 +33,11 @@ from jsonschema import FormatChecker
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonDict, RoomID, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
@@ -120,25 +134,29 @@ USER_FILTER_SCHEMA = {
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_room_id")
|
||||
def matrix_room_id_validator(room_id_str):
|
||||
def matrix_room_id_validator(room_id_str: str) -> RoomID:
|
||||
return RoomID.from_string(room_id_str)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks("matrix_user_id")
|
||||
def matrix_user_id_validator(user_id_str):
|
||||
def matrix_user_id_validator(user_id_str: str) -> UserID:
|
||||
return UserID.from_string(user_id_str)
|
||||
|
||||
|
||||
class Filtering:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def get_user_filter(self, user_localpart, filter_id):
|
||||
async def get_user_filter(
|
||||
self, user_localpart: str, filter_id: Union[int, str]
|
||||
) -> "FilterCollection":
|
||||
result = await self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(result)
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
def add_user_filter(
|
||||
self, user_localpart: str, user_filter: JsonDict
|
||||
) -> Awaitable[int]:
|
||||
self.check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
@@ -146,13 +164,13 @@ class Filtering:
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
# them however
|
||||
|
||||
def check_valid_filter(self, user_filter_json):
|
||||
def check_valid_filter(self, user_filter_json: JsonDict) -> None:
|
||||
"""Check if the provided filter is valid.
|
||||
|
||||
This inspects all definitions contained within the filter.
|
||||
|
||||
Args:
|
||||
user_filter_json(dict): The filter
|
||||
user_filter_json: The filter
|
||||
Raises:
|
||||
SynapseError: If the filter is not valid.
|
||||
"""
|
||||
@@ -167,8 +185,12 @@ class Filtering:
|
||||
raise SynapseError(400, str(e))
|
||||
|
||||
|
||||
# Filters work across events, presence EDUs, and account data.
|
||||
FilterEvent = TypeVar("FilterEvent", EventBase, UserPresenceState, JsonDict)
|
||||
|
||||
|
||||
class FilterCollection:
|
||||
def __init__(self, filter_json):
|
||||
def __init__(self, filter_json: JsonDict):
|
||||
self._filter_json = filter_json
|
||||
|
||||
room_filter_json = self._filter_json.get("room", {})
|
||||
@@ -188,59 +210,61 @@ class FilterCollection:
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
self.event_format = filter_json.get("event_format", "client")
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
|
||||
def get_filter_json(self):
|
||||
def get_filter_json(self) -> JsonDict:
|
||||
return self._filter_json
|
||||
|
||||
def timeline_limit(self):
|
||||
def timeline_limit(self) -> int:
|
||||
return self._room_timeline_filter.limit()
|
||||
|
||||
def presence_limit(self):
|
||||
def presence_limit(self) -> int:
|
||||
return self._presence_filter.limit()
|
||||
|
||||
def ephemeral_limit(self):
|
||||
def ephemeral_limit(self) -> int:
|
||||
return self._room_ephemeral_filter.limit()
|
||||
|
||||
def lazy_load_members(self):
|
||||
def lazy_load_members(self) -> bool:
|
||||
return self._room_state_filter.lazy_load_members()
|
||||
|
||||
def include_redundant_members(self):
|
||||
def include_redundant_members(self) -> bool:
|
||||
return self._room_state_filter.include_redundant_members()
|
||||
|
||||
def filter_presence(self, events):
|
||||
def filter_presence(
|
||||
self, events: Iterable[UserPresenceState]
|
||||
) -> List[UserPresenceState]:
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
def filter_account_data(self, events):
|
||||
def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._account_data.filter(events)
|
||||
|
||||
def filter_room_state(self, events):
|
||||
def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
|
||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_timeline(self, events):
|
||||
def filter_room_timeline(self, events: Iterable[EventBase]) -> List[EventBase]:
|
||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_ephemeral(self, events):
|
||||
def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_account_data(self, events):
|
||||
def filter_room_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
def blocks_all_presence(self):
|
||||
def blocks_all_presence(self) -> bool:
|
||||
return (
|
||||
self._presence_filter.filters_all_types()
|
||||
or self._presence_filter.filters_all_senders()
|
||||
)
|
||||
|
||||
def blocks_all_room_ephemeral(self):
|
||||
def blocks_all_room_ephemeral(self) -> bool:
|
||||
return (
|
||||
self._room_ephemeral_filter.filters_all_types()
|
||||
or self._room_ephemeral_filter.filters_all_senders()
|
||||
or self._room_ephemeral_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
def blocks_all_room_timeline(self):
|
||||
def blocks_all_room_timeline(self) -> bool:
|
||||
return (
|
||||
self._room_timeline_filter.filters_all_types()
|
||||
or self._room_timeline_filter.filters_all_senders()
|
||||
@@ -249,7 +273,7 @@ class FilterCollection:
|
||||
|
||||
|
||||
class Filter:
|
||||
def __init__(self, filter_json):
|
||||
def __init__(self, filter_json: JsonDict):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
@@ -266,26 +290,26 @@ class Filter:
|
||||
self.labels = self.filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
def filters_all_types(self):
|
||||
def filters_all_types(self) -> bool:
|
||||
return "*" in self.not_types
|
||||
|
||||
def filters_all_senders(self):
|
||||
def filters_all_senders(self) -> bool:
|
||||
return "*" in self.not_senders
|
||||
|
||||
def filters_all_rooms(self):
|
||||
def filters_all_rooms(self) -> bool:
|
||||
return "*" in self.not_rooms
|
||||
|
||||
def check(self, event):
|
||||
def check(self, event: FilterEvent) -> bool:
|
||||
"""Checks whether the filter matches the given event.
|
||||
|
||||
Returns:
|
||||
bool: True if the event matches
|
||||
True if the event matches
|
||||
"""
|
||||
# We usually get the full "events" as dictionaries coming through,
|
||||
# except for presence which actually gets passed around as its own
|
||||
# namedtuple type.
|
||||
if isinstance(event, UserPresenceState):
|
||||
sender = event.user_id
|
||||
sender: Optional[str] = event.user_id
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
contains_url = False
|
||||
@@ -305,18 +329,25 @@ class Filter:
|
||||
room_id = event.get("room_id", None)
|
||||
ev_type = event.get("type", None)
|
||||
|
||||
content = event.get("content", {})
|
||||
content = event.get("content") or {}
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
contains_url = isinstance(content.get("url"), str)
|
||||
labels = content.get(EventContentFields.LABELS, [])
|
||||
|
||||
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, labels, contains_url):
|
||||
def check_fields(
|
||||
self,
|
||||
room_id: Optional[str],
|
||||
sender: Optional[str],
|
||||
event_type: Optional[str],
|
||||
labels: Container[str],
|
||||
contains_url: bool,
|
||||
) -> bool:
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
bool: True if the event fields match
|
||||
True if the event fields match
|
||||
"""
|
||||
literal_keys = {
|
||||
"rooms": lambda v: room_id == v,
|
||||
@@ -343,14 +374,14 @@ class Filter:
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
def filter_rooms(self, room_ids: Iterable[str]) -> Set[str]:
|
||||
"""Apply the 'rooms' filter to a given list of rooms.
|
||||
|
||||
Args:
|
||||
room_ids (list): A list of room_ids.
|
||||
room_ids: A list of room_ids.
|
||||
|
||||
Returns:
|
||||
list: A list of room_ids that match the filter
|
||||
A list of room_ids that match the filter
|
||||
"""
|
||||
room_ids = set(room_ids)
|
||||
|
||||
@@ -363,23 +394,23 @@ class Filter:
|
||||
|
||||
return room_ids
|
||||
|
||||
def filter(self, events):
|
||||
def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
|
||||
return list(filter(self.check, events))
|
||||
|
||||
def limit(self):
|
||||
def limit(self) -> int:
|
||||
return self.filter_json.get("limit", 10)
|
||||
|
||||
def lazy_load_members(self):
|
||||
def lazy_load_members(self) -> bool:
|
||||
return self.filter_json.get("lazy_load_members", False)
|
||||
|
||||
def include_redundant_members(self):
|
||||
def include_redundant_members(self) -> bool:
|
||||
return self.filter_json.get("include_redundant_members", False)
|
||||
|
||||
def with_room_ids(self, room_ids):
|
||||
def with_room_ids(self, room_ids: Iterable[str]) -> "Filter":
|
||||
"""Returns a new filter with the given room IDs appended.
|
||||
|
||||
Args:
|
||||
room_ids (iterable[unicode]): The room_ids to add
|
||||
room_ids: The room_ids to add
|
||||
|
||||
Returns:
|
||||
filter: A new filter including the given rooms and the old
|
||||
@@ -390,8 +421,8 @@ class Filter:
|
||||
return newFilter
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
def _matches_wildcard(actual_value: Optional[str], filter_value: str) -> bool:
|
||||
if filter_value.endswith("*") and isinstance(actual_value, str):
|
||||
type_prefix = filter_value[:-1]
|
||||
return actual_value.startswith(type_prefix)
|
||||
else:
|
||||
|
||||
@@ -12,49 +12,48 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.types import JsonDict
|
||||
|
||||
|
||||
class UserPresenceState(
|
||||
namedtuple(
|
||||
"UserPresenceState",
|
||||
(
|
||||
"user_id",
|
||||
"state",
|
||||
"last_active_ts",
|
||||
"last_federation_update_ts",
|
||||
"last_user_sync_ts",
|
||||
"status_msg",
|
||||
"currently_active",
|
||||
),
|
||||
)
|
||||
):
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class UserPresenceState:
|
||||
"""Represents the current presence state of the user.
|
||||
|
||||
user_id (str)
|
||||
last_active (int): Time in msec that the user last interacted with server.
|
||||
last_federation_update (int): Time in msec since either a) we sent a presence
|
||||
user_id
|
||||
last_active: Time in msec that the user last interacted with server.
|
||||
last_federation_update: Time in msec since either a) we sent a presence
|
||||
update to other servers or b) we received a presence update, depending
|
||||
on if is a local user or not.
|
||||
last_user_sync (int): Time in msec that the user last *completed* a sync
|
||||
last_user_sync: Time in msec that the user last *completed* a sync
|
||||
(or event stream).
|
||||
status_msg (str): User set status message.
|
||||
status_msg: User set status message.
|
||||
"""
|
||||
|
||||
def as_dict(self):
|
||||
return dict(self._asdict())
|
||||
user_id: str
|
||||
state: str
|
||||
last_active_ts: int
|
||||
last_federation_update_ts: int
|
||||
last_user_sync_ts: int
|
||||
status_msg: Optional[str]
|
||||
currently_active: bool
|
||||
|
||||
def as_dict(self) -> JsonDict:
|
||||
return attr.asdict(self)
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d):
|
||||
def from_dict(d: JsonDict) -> "UserPresenceState":
|
||||
return UserPresenceState(**d)
|
||||
|
||||
def copy_and_replace(self, **kwargs):
|
||||
return self._replace(**kwargs)
|
||||
def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState":
|
||||
return attr.evolve(self, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def default(cls, user_id):
|
||||
def default(cls, user_id: str) -> "UserPresenceState":
|
||||
"""Returns a default presence state."""
|
||||
return cls(
|
||||
user_id=user_id,
|
||||
|
||||
@@ -17,6 +17,7 @@ from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.config.ratelimiting import RateLimitConfig
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Requester
|
||||
from synapse.util import Clock
|
||||
@@ -160,7 +161,7 @@ class Ratelimiter:
|
||||
|
||||
return allowed, time_allowed
|
||||
|
||||
def _prune_message_counts(self, time_now_s: float):
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
@@ -189,7 +190,7 @@ class Ratelimiter:
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[float] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
@@ -233,3 +234,88 @@ class Ratelimiter:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now_s))
|
||||
)
|
||||
|
||||
|
||||
class RequestRatelimiter:
|
||||
def __init__(
|
||||
self,
|
||||
store: DataStore,
|
||||
clock: Clock,
|
||||
rc_message: RateLimitConfig,
|
||||
rc_admin_redaction: Optional[RateLimitConfig],
|
||||
):
|
||||
self.store = store
|
||||
self.clock = clock
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
)
|
||||
self._rc_message = rc_message
|
||||
|
||||
# Check whether ratelimiting room admin message redaction is enabled
|
||||
# by the presence of rate limits in the config
|
||||
if rc_admin_redaction:
|
||||
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=rc_admin_redaction.per_second,
|
||||
burst_count=rc_admin_redaction.burst_count,
|
||||
)
|
||||
else:
|
||||
self.admin_redaction_ratelimiter = None
|
||||
|
||||
async def ratelimit(
|
||||
self,
|
||||
requester: Requester,
|
||||
update: bool = True,
|
||||
is_admin_redaction: bool = False,
|
||||
) -> None:
|
||||
"""Ratelimits requests.
|
||||
|
||||
Args:
|
||||
requester
|
||||
update: Whether to record that a request is being processed.
|
||||
Set to False when doing multiple checks for one request (e.g.
|
||||
to check up front if we would reject the request), and set to
|
||||
True for the last call for a given request.
|
||||
is_admin_redaction: Whether this is a room admin/moderator
|
||||
redacting an event. If so then we may apply different
|
||||
ratelimits depending on config.
|
||||
|
||||
Raises:
|
||||
LimitExceededError if the request should be ratelimited
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# The AS user itself is never rate limited.
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
messages_per_second = self._rc_message.per_second
|
||||
burst_count = self._rc_message.burst_count
|
||||
|
||||
# Check if there is a per user override in the DB.
|
||||
override = await self.store.get_ratelimit_for_user(user_id)
|
||||
if override:
|
||||
# If overridden with a null Hz then ratelimiting has been entirely
|
||||
# disabled for the user
|
||||
if not override.messages_per_second:
|
||||
return
|
||||
|
||||
messages_per_second = override.messages_per_second
|
||||
burst_count = override.burst_count
|
||||
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester,
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ from hashlib import sha256
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client"
|
||||
CLIENT_API_PREFIX = "/_matrix/client"
|
||||
@@ -34,11 +35,7 @@ LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
|
||||
class ConsentURIBuilder:
|
||||
def __init__(self, hs_config):
|
||||
"""
|
||||
Args:
|
||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||
"""
|
||||
def __init__(self, hs_config: HomeServerConfig):
|
||||
if hs_config.key.form_secret is None:
|
||||
raise ConfigError("form_secret not set in config")
|
||||
if hs_config.server.public_baseurl is None:
|
||||
@@ -47,15 +44,15 @@ class ConsentURIBuilder:
|
||||
self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.server.public_baseurl
|
||||
|
||||
def build_user_consent_uri(self, user_id):
|
||||
def build_user_consent_uri(self, user_id: str) -> str:
|
||||
"""Build a URI which we can give to the user to do their privacy
|
||||
policy consent
|
||||
|
||||
Args:
|
||||
user_id (str): mxid or username of user
|
||||
user_id: mxid or username of user
|
||||
|
||||
Returns
|
||||
(str) the URI where the user can do consent
|
||||
The URI where the user can do consent
|
||||
"""
|
||||
mac = hmac.new(
|
||||
key=self._hmac_secret, msg=user_id.encode("ascii"), digestmod=sha256
|
||||
|
||||
@@ -31,6 +31,7 @@ import twisted
|
||||
from twisted.internet import defer, error, reactor
|
||||
from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
@@ -42,11 +43,13 @@ from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.gai_resolver import GAIResolver
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -86,11 +89,11 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||
|
||||
start_reactor(
|
||||
appname,
|
||||
soft_file_limit=config.soft_file_limit,
|
||||
gc_thresholds=config.gc_thresholds,
|
||||
soft_file_limit=config.server.soft_file_limit,
|
||||
gc_thresholds=config.server.gc_thresholds,
|
||||
pid_file=config.worker.worker_pid_file,
|
||||
daemonize=config.worker.worker_daemonize,
|
||||
print_pidfile=config.print_pidfile,
|
||||
print_pidfile=config.server.print_pidfile,
|
||||
logger=logger,
|
||||
run_command=run_command,
|
||||
)
|
||||
@@ -293,15 +296,15 @@ def listen_ssl(
|
||||
return r
|
||||
|
||||
|
||||
def refresh_certificate(hs):
|
||||
def refresh_certificate(hs: "HomeServer"):
|
||||
"""
|
||||
Refresh the TLS certificates that Synapse is using by re-reading them from
|
||||
disk and updating the TLS context factories to use them.
|
||||
"""
|
||||
if not hs.config.has_tls_listener():
|
||||
if not hs.config.server.has_tls_listener():
|
||||
return
|
||||
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.config.tls.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
||||
|
||||
if hs._listening_services:
|
||||
@@ -337,9 +340,19 @@ async def start(hs: "HomeServer"):
|
||||
Args:
|
||||
hs: homeserver instance
|
||||
"""
|
||||
reactor = hs.get_reactor()
|
||||
|
||||
# We want to use a separate thread pool for the resolver so that large
|
||||
# numbers of DNS requests don't starve out other users of the threadpool.
|
||||
resolver_threadpool = ThreadPool(name="gai_resolver")
|
||||
resolver_threadpool.start()
|
||||
reactor.addSystemEventTrigger("during", "shutdown", resolver_threadpool.stop)
|
||||
reactor.installNameResolver(
|
||||
GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool)
|
||||
)
|
||||
|
||||
# Set up the SIGHUP machinery.
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
reactor = hs.get_reactor()
|
||||
|
||||
@wrap_as_background_process("sighup")
|
||||
def handle_sighup(*args, **kwargs):
|
||||
@@ -379,6 +392,7 @@ async def start(hs: "HomeServer"):
|
||||
load_legacy_spam_checkers(hs)
|
||||
load_legacy_third_party_event_rules(hs)
|
||||
load_legacy_presence_router(hs)
|
||||
load_legacy_password_auth_providers(hs)
|
||||
|
||||
# If we've configured an expiry time for caches, start the background job now.
|
||||
setup_expire_lru_cache_entries(hs)
|
||||
@@ -417,11 +431,11 @@ async def start(hs: "HomeServer"):
|
||||
atexit.register(gc.freeze)
|
||||
|
||||
|
||||
def setup_sentry(hs):
|
||||
def setup_sentry(hs: "HomeServer"):
|
||||
"""Enable sentry integration, if enabled in configuration
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
hs
|
||||
"""
|
||||
|
||||
if not hs.config.metrics.sentry_enabled:
|
||||
@@ -447,7 +461,7 @@ def setup_sentry(hs):
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
def setup_sdnotify(hs):
|
||||
def setup_sdnotify(hs: "HomeServer"):
|
||||
"""Adds process state hooks to tell systemd what we are up to."""
|
||||
|
||||
# Tell systemd our state, if we're using it. This will silently fail if
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -58,6 +59,7 @@ class AdminCmdSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -66,11 +68,11 @@ class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdSlavedStore
|
||||
|
||||
|
||||
async def export_data_command(hs, args):
|
||||
async def export_data_command(hs: HomeServer, args):
|
||||
"""Export data for a user.
|
||||
|
||||
Args:
|
||||
hs (HomeServer)
|
||||
hs
|
||||
args (argparse.Namespace)
|
||||
"""
|
||||
|
||||
@@ -185,24 +187,20 @@ def start(config_options):
|
||||
# a full worker config.
|
||||
config.worker.worker_app = "synapse.app.admin_cmd"
|
||||
|
||||
if (
|
||||
not config.worker.worker_daemonize
|
||||
and not config.worker.worker_log_file
|
||||
and not config.worker.worker_log_config
|
||||
):
|
||||
if not config.worker.worker_daemonize and not config.worker.worker_log_config:
|
||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||
# unless we've actually set log config.
|
||||
config.logging.no_redirect_stdio = True
|
||||
|
||||
# Explicitly disable background processes
|
||||
config.update_user_directory = False
|
||||
config.server.update_user_directory = False
|
||||
config.worker.run_background_tasks = False
|
||||
config.start_pushers = False
|
||||
config.pusher_shard_config.instances = []
|
||||
config.send_federation = False
|
||||
config.federation_shard_config.instances = []
|
||||
config.worker.start_pushers = False
|
||||
config.worker.pusher_shard_config.instances = []
|
||||
config.worker.send_federation = False
|
||||
config.worker.federation_shard_config.instances = []
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
|
||||
ss = AdminCmdServer(
|
||||
config.server.server_name,
|
||||
@@ -221,7 +219,7 @@ def start(config_options):
|
||||
|
||||
async def run():
|
||||
with LoggingContext("command"):
|
||||
_base.start(ss)
|
||||
await _base.start(ss)
|
||||
await args.func(ss, args)
|
||||
|
||||
_base.start_worker_reactor(
|
||||
|
||||
@@ -131,10 +131,10 @@ class KeyUploadServlet(RestServlet):
|
||||
|
||||
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: HomeServer):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
hs: server
|
||||
"""
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
@@ -462,7 +462,7 @@ def start(config_options):
|
||||
# For other worker types we force this to off.
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
|
||||
@@ -234,7 +234,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
if self.config.media.enable_media_repo:
|
||||
if self.config.server.enable_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update(
|
||||
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
|
||||
@@ -248,7 +248,7 @@ class SynapseHomeServer(HomeServer):
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
if name == "webclient":
|
||||
webclient_loc = self.config.web_client_location
|
||||
webclient_loc = self.config.server.web_client_location
|
||||
|
||||
if webclient_loc is None:
|
||||
logger.warning(
|
||||
@@ -343,7 +343,7 @@ def setup(config_options):
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
@@ -412,7 +412,7 @@ def format_config_error(e: ConfigError) -> Iterator[str]:
|
||||
e = e.__cause__
|
||||
|
||||
|
||||
def run(hs):
|
||||
def run(hs: HomeServer):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
|
||||
@@ -439,11 +439,11 @@ def run(hs):
|
||||
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
soft_file_limit=hs.config.soft_file_limit,
|
||||
gc_thresholds=hs.config.gc_thresholds,
|
||||
pid_file=hs.config.pid_file,
|
||||
daemonize=hs.config.daemonize,
|
||||
print_pidfile=hs.config.print_pidfile,
|
||||
soft_file_limit=hs.config.server.soft_file_limit,
|
||||
gc_thresholds=hs.config.server.gc_thresholds,
|
||||
pid_file=hs.config.server.pid_file,
|
||||
daemonize=hs.config.server.daemonize,
|
||||
print_pidfile=hs.config.server.print_pidfile,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
|
||||
@@ -15,11 +15,15 @@ import logging
|
||||
import math
|
||||
import resource
|
||||
import sys
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
@@ -41,7 +45,7 @@ registered_reserved_users_mau_gauge = Gauge(
|
||||
|
||||
|
||||
@wrap_as_background_process("phone_stats_home")
|
||||
async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
async def phone_stats_home(hs: "HomeServer", stats, stats_process=_stats_process):
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - hs.start_time)
|
||||
@@ -74,7 +78,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
store = hs.get_datastore()
|
||||
|
||||
stats["homeserver"] = hs.config.server.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["server_context"] = hs.config.server.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
@@ -142,7 +146,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
|
||||
|
||||
def start_phone_stats_home(hs):
|
||||
def start_phone_stats_home(hs: "HomeServer"):
|
||||
"""
|
||||
Start the background tasks which report phone home stats.
|
||||
"""
|
||||
@@ -171,7 +175,7 @@ def start_phone_stats_home(hs):
|
||||
current_mau_count_by_service = {}
|
||||
reserved_users = ()
|
||||
store = hs.get_datastore()
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
|
||||
current_mau_count = await store.get_monthly_active_count()
|
||||
current_mau_count_by_service = (
|
||||
await store.get_monthly_active_count_by_service()
|
||||
@@ -183,9 +187,9 @@ def start_phone_stats_home(hs):
|
||||
current_mau_by_service_gauge.labels(app_service).set(float(count))
|
||||
|
||||
registered_reserved_users_mau_gauge.set(float(len(reserved_users)))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
max_mau_gauge.set(float(hs.config.server.max_mau_value))
|
||||
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only:
|
||||
generate_monthly_active_users()
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
@@ -27,6 +27,7 @@ from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -84,7 +85,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
pushing.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,25 +12,44 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
def main(args):
|
||||
action = args[1] if len(args) > 1 and args[1] == "read" else None
|
||||
# If we're reading a key in the config file, then `args[1]` will be `read` and `args[2]`
|
||||
# will be the key to read.
|
||||
# We'll want to rework this code if we want to support more actions than just `read`.
|
||||
load_config_args = args[3:] if action else args[1:]
|
||||
|
||||
action = sys.argv[1]
|
||||
try:
|
||||
config = HomeServerConfig.load_config("", load_config_args)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
print("Config parses OK!")
|
||||
|
||||
if action == "read":
|
||||
key = sys.argv[2]
|
||||
key = args[2]
|
||||
key_parts = key.split(".")
|
||||
|
||||
value = config
|
||||
try:
|
||||
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
while len(key_parts):
|
||||
value = getattr(value, key_parts[0])
|
||||
key_parts.pop(0)
|
||||
|
||||
print(f"\n{key}: {value}")
|
||||
except AttributeError:
|
||||
print(
|
||||
f"\nNo '{key}' key could be found in the provided configuration file."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
print(getattr(config, key))
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
||||
@@ -118,21 +118,6 @@ class Config:
|
||||
"synapse", "res/templates"
|
||||
)
|
||||
|
||||
def __getattr__(self, item: str) -> Any:
|
||||
"""
|
||||
Try and fetch a configuration option that does not exist on this class.
|
||||
|
||||
This is so that existing configs that rely on `self.value`, where value
|
||||
is actually from a different config section, continue to work.
|
||||
"""
|
||||
if item in ["generate_config_section", "read_config"]:
|
||||
raise AttributeError(item)
|
||||
|
||||
if self.root is None:
|
||||
raise AttributeError(item)
|
||||
else:
|
||||
return self.root._get_unclassed_config(self.section, item)
|
||||
|
||||
@staticmethod
|
||||
def parse_size(value):
|
||||
if isinstance(value, int):
|
||||
@@ -289,7 +274,9 @@ class Config:
|
||||
env.filters.update(
|
||||
{
|
||||
"format_ts": _format_ts_filter,
|
||||
"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
|
||||
"mxc_to_http": _create_mxc_to_http_filter(
|
||||
self.root.server.public_baseurl
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -311,8 +298,6 @@ class RootConfig:
|
||||
config_classes = []
|
||||
|
||||
def __init__(self):
|
||||
self._configs = OrderedDict()
|
||||
|
||||
for config_class in self.config_classes:
|
||||
if config_class.section is None:
|
||||
raise ValueError("%r requires a section name" % (config_class,))
|
||||
@@ -321,42 +306,7 @@ class RootConfig:
|
||||
conf = config_class(self)
|
||||
except Exception as e:
|
||||
raise Exception("Failed making %s: %r" % (config_class.section, e))
|
||||
self._configs[config_class.section] = conf
|
||||
|
||||
def __getattr__(self, item: str) -> Any:
|
||||
"""
|
||||
Redirect lookups on this object either to config objects, or values on
|
||||
config objects, so that `config.tls.blah` works, as well as legacy uses
|
||||
of things like `config.server_name`. It will first look up the config
|
||||
section name, and then values on those config classes.
|
||||
"""
|
||||
if item in self._configs.keys():
|
||||
return self._configs[item]
|
||||
|
||||
return self._get_unclassed_config(None, item)
|
||||
|
||||
def _get_unclassed_config(self, asking_section: Optional[str], item: str):
|
||||
"""
|
||||
Fetch a config value from one of the instantiated config classes that
|
||||
has not been fetched directly.
|
||||
|
||||
Args:
|
||||
asking_section: If this check is coming from a Config child, which
|
||||
one? This section will not be asked if it has the value.
|
||||
item: The configuration value key.
|
||||
|
||||
Raises:
|
||||
AttributeError if no config classes have the config key. The body
|
||||
will contain what sections were checked.
|
||||
"""
|
||||
for key, val in self._configs.items():
|
||||
if key == asking_section:
|
||||
continue
|
||||
|
||||
if item in dir(val):
|
||||
return getattr(val, item)
|
||||
|
||||
raise AttributeError(item, "not found in %s" % (list(self._configs.keys()),))
|
||||
setattr(self, config_class.section, conf)
|
||||
|
||||
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any]:
|
||||
"""
|
||||
@@ -373,9 +323,11 @@ class RootConfig:
|
||||
"""
|
||||
res = OrderedDict()
|
||||
|
||||
for name, config in self._configs.items():
|
||||
for config_class in self.config_classes:
|
||||
config = getattr(self, config_class.section)
|
||||
|
||||
if hasattr(config, func_name):
|
||||
res[name] = getattr(config, func_name)(*args, **kwargs)
|
||||
res[config_class.section] = getattr(config, func_name)(*args, **kwargs)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ from synapse.config import (
|
||||
redis,
|
||||
registration,
|
||||
repository,
|
||||
retention,
|
||||
room_directory,
|
||||
saml2,
|
||||
server,
|
||||
@@ -91,6 +92,7 @@ class RootConfig:
|
||||
modules: modules.ModulesConfig
|
||||
caches: cache.CacheConfig
|
||||
federation: federation.FederationConfig
|
||||
retention: retention.RetentionConfig
|
||||
|
||||
config_classes: List = ...
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
@@ -76,7 +76,7 @@ class AccountValidityConfig(Config):
|
||||
)
|
||||
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
if not self.public_baseurl:
|
||||
if not self.root.server.public_baseurl:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
# Load account validity templates.
|
||||
|
||||
@@ -37,7 +37,7 @@ class CasConfig(Config):
|
||||
|
||||
# The public baseurl is required because it is used by the redirect
|
||||
# template.
|
||||
public_baseurl = self.public_baseurl
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
if not public_baseurl:
|
||||
raise ConfigError("cas_config requires a public_baseurl to be set")
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ import email.utils
|
||||
import logging
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
import attr
|
||||
|
||||
@@ -135,7 +134,7 @@ class EmailConfig(Config):
|
||||
# msisdn is currently always remote while Synapse does not support any method of
|
||||
# sending SMS messages
|
||||
ThreepidBehaviour.REMOTE
|
||||
if self.account_threepid_delegate_email
|
||||
if self.root.registration.account_threepid_delegate_email
|
||||
else ThreepidBehaviour.LOCAL
|
||||
)
|
||||
# Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
|
||||
@@ -144,7 +143,7 @@ class EmailConfig(Config):
|
||||
# identity server in the process.
|
||||
self.using_identity_server_from_trusted_list = False
|
||||
if (
|
||||
not self.account_threepid_delegate_email
|
||||
not self.root.registration.account_threepid_delegate_email
|
||||
and config.get("trust_identity_server_for_password_resets", False) is True
|
||||
):
|
||||
# Use the first entry in self.trusted_third_party_id_servers instead
|
||||
@@ -156,7 +155,7 @@ class EmailConfig(Config):
|
||||
|
||||
# trusted_third_party_id_servers does not contain a scheme whereas
|
||||
# account_threepid_delegate_email is expected to. Presume https
|
||||
self.account_threepid_delegate_email: Optional[str] = (
|
||||
self.root.registration.account_threepid_delegate_email = (
|
||||
"https://" + first_trusted_identity_server
|
||||
)
|
||||
self.using_identity_server_from_trusted_list = True
|
||||
@@ -335,7 +334,7 @@ class EmailConfig(Config):
|
||||
"client_base_url", email_config.get("riot_base_url", None)
|
||||
)
|
||||
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
if self.root.account_validity.account_validity_renew_by_email_enabled:
|
||||
expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html"
|
||||
)
|
||||
|
||||
@@ -24,6 +24,11 @@ class ExperimentalConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs):
|
||||
experimental = config.get("experimental_features") or {}
|
||||
|
||||
# Whether to enable experimental MSC1849 (aka relations) support
|
||||
self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
|
||||
# MSC3440 (thread relation)
|
||||
self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ from .ratelimiting import RatelimitConfig
|
||||
from .redis import RedisConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .retention import RetentionConfig
|
||||
from .room import RoomConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2 import SAML2Config
|
||||
@@ -59,6 +60,7 @@ class HomeServerConfig(RootConfig):
|
||||
config_classes = [
|
||||
ModulesConfig,
|
||||
ServerConfig,
|
||||
RetentionConfig,
|
||||
TlsConfig,
|
||||
FederationConfig,
|
||||
CacheConfig,
|
||||
|
||||
@@ -145,11 +145,13 @@ class KeyConfig(Config):
|
||||
|
||||
# list of TrustedKeyServer objects
|
||||
self.key_servers = list(
|
||||
_parse_key_servers(key_servers, self.federation_verify_certificates)
|
||||
_parse_key_servers(
|
||||
key_servers, self.root.tls.federation_verify_certificates
|
||||
)
|
||||
)
|
||||
|
||||
self.macaroon_secret_key = config.get(
|
||||
"macaroon_secret_key", self.registration_shared_secret
|
||||
"macaroon_secret_key", self.root.registration.registration_shared_secret
|
||||
)
|
||||
|
||||
if not self.macaroon_secret_key:
|
||||
|
||||
@@ -18,6 +18,7 @@ import os
|
||||
import sys
|
||||
import threading
|
||||
from string import Template
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import yaml
|
||||
from zope.interface import implementer
|
||||
@@ -38,6 +39,9 @@ from synapse.util.versionstring import get_version_string
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
DEFAULT_LOG_CONFIG = Template(
|
||||
"""\
|
||||
# Log configuration for Synapse.
|
||||
@@ -306,7 +310,10 @@ def _reload_logging_config(log_config_path):
|
||||
|
||||
|
||||
def setup_logging(
|
||||
hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner
|
||||
hs: "HomeServer",
|
||||
config,
|
||||
use_worker_options=False,
|
||||
logBeginner: LogBeginner = globalLogBeginner,
|
||||
) -> None:
|
||||
"""
|
||||
Set up the logging subsystem.
|
||||
|
||||
@@ -58,7 +58,7 @@ class OIDCConfig(Config):
|
||||
"Multiple OIDC providers have the idp_id %r." % idp_id
|
||||
)
|
||||
|
||||
public_baseurl = self.public_baseurl
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError("oidc_config requires a public_baseurl to be set")
|
||||
self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback"
|
||||
|
||||
@@ -25,6 +25,29 @@ class PasswordAuthProviderConfig(Config):
|
||||
section = "authproviders"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
"""Parses the old password auth providers config. The config format looks like this:
|
||||
|
||||
password_providers:
|
||||
# Example config for an LDAP auth provider
|
||||
- module: "ldap_auth_provider.LdapAuthProvider"
|
||||
config:
|
||||
enabled: true
|
||||
uri: "ldap://ldap.example.com:389"
|
||||
start_tls: true
|
||||
base: "ou=users,dc=example,dc=com"
|
||||
attributes:
|
||||
uid: "cn"
|
||||
mail: "email"
|
||||
name: "givenName"
|
||||
#bind_dn:
|
||||
#bind_password:
|
||||
#filter: "(objectClass=posixAccount)"
|
||||
|
||||
We expect admins to use modules for this feature (which is why it doesn't appear
|
||||
in the sample config file), but we want to keep support for it around for a bit
|
||||
for backwards compatibility.
|
||||
"""
|
||||
|
||||
self.password_providers: List[Tuple[Type, Any]] = []
|
||||
providers = []
|
||||
|
||||
@@ -49,33 +72,3 @@ class PasswordAuthProviderConfig(Config):
|
||||
)
|
||||
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
# Password providers allow homeserver administrators to integrate
|
||||
# their Synapse installation with existing authentication methods
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
# respectively.
|
||||
#
|
||||
password_providers:
|
||||
# # Example config for an LDAP auth provider
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
|
||||
@@ -45,7 +45,10 @@ class RegistrationConfig(Config):
|
||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||
if self.account_threepid_delegate_msisdn and not self.public_baseurl:
|
||||
if (
|
||||
self.account_threepid_delegate_msisdn
|
||||
and not self.root.server.public_baseurl
|
||||
):
|
||||
raise ConfigError(
|
||||
"The configuration option `public_baseurl` is required if "
|
||||
"`account_threepid_delegate.msisdn` is set, such that "
|
||||
@@ -85,7 +88,7 @@ class RegistrationConfig(Config):
|
||||
if mxid_localpart:
|
||||
# Convert the localpart to a full mxid.
|
||||
self.auto_join_user_id = UserID(
|
||||
mxid_localpart, self.server_name
|
||||
mxid_localpart, self.root.server.server_name
|
||||
).to_string()
|
||||
|
||||
if self.autocreate_auto_join_rooms:
|
||||
|
||||
@@ -94,7 +94,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Only enable the media repo if either the media repo is enabled or the
|
||||
# current worker app is the media repo.
|
||||
if (
|
||||
self.enable_media_repo is False
|
||||
self.root.server.enable_media_repo is False
|
||||
and config.get("worker_app") != "synapse.app.media_repository"
|
||||
):
|
||||
self.can_load_media_repo = False
|
||||
|
||||
226
synapse/config/retention.py
Normal file
226
synapse/config/retention.py
Normal file
@@ -0,0 +1,226 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RetentionPurgeJob:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
interval: int
|
||||
shortest_max_lifetime: Optional[int]
|
||||
longest_max_lifetime: Optional[int]
|
||||
|
||||
|
||||
class RetentionConfig(Config):
|
||||
section = "retention"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
if self.retention_enabled:
|
||||
logger.info(
|
||||
"Message retention policies support enabled with the following default"
|
||||
" policy: min_lifetime = %s ; max_lifetime = %s",
|
||||
self.retention_default_min_lifetime,
|
||||
self.retention_default_max_lifetime,
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get(
|
||||
"allowed_lifetime_max"
|
||||
)
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min
|
||||
> self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs: List[RetentionPurgeJob] = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
RetentionPurgeJob(self.parse_duration("1d"), None, None)
|
||||
]
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, and the state of a room contains a
|
||||
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
|
||||
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
|
||||
# to these limits when running purge jobs.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
# If any purge job is configured, it is strongly recommended to have at least
|
||||
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
|
||||
# set, or one job without 'shortest_max_lifetime' and one job without
|
||||
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
|
||||
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
|
||||
# room's policy to these values is done after the policies are retrieved from
|
||||
# Synapse's database (which is done using the range specified in a purge job's
|
||||
# configuration).
|
||||
#
|
||||
#purge_jobs:
|
||||
# - longest_max_lifetime: 3d
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
"""
|
||||
@@ -199,7 +199,7 @@ class SAML2Config(Config):
|
||||
"""
|
||||
import saml2
|
||||
|
||||
public_baseurl = self.public_baseurl
|
||||
public_baseurl = self.root.server.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError("saml2_config requires a public_baseurl to be set")
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +17,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -184,49 +182,65 @@ KNOWN_RESOURCES = {
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class HttpResourceConfig:
|
||||
names = attr.ib(
|
||||
type=List[str],
|
||||
names: List[str] = attr.ib(
|
||||
factory=list,
|
||||
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
|
||||
)
|
||||
compress = attr.ib(
|
||||
type=bool,
|
||||
compress: bool = attr.ib(
|
||||
default=False,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class HttpListenerConfig:
|
||||
"""Object describing the http-specific parts of the config of a listener"""
|
||||
|
||||
x_forwarded = attr.ib(type=bool, default=False)
|
||||
resources = attr.ib(type=List[HttpResourceConfig], factory=list)
|
||||
additional_resources = attr.ib(type=Dict[str, dict], factory=dict)
|
||||
tag = attr.ib(type=str, default=None)
|
||||
x_forwarded: bool = False
|
||||
resources: List[HttpResourceConfig] = attr.ib(factory=list)
|
||||
additional_resources: Dict[str, dict] = attr.ib(factory=dict)
|
||||
tag: Optional[str] = None
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ListenerConfig:
|
||||
"""Object describing the configuration of a single listener."""
|
||||
|
||||
port = attr.ib(type=int, validator=attr.validators.instance_of(int))
|
||||
bind_addresses = attr.ib(type=List[str])
|
||||
type = attr.ib(type=str, validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls = attr.ib(type=bool, default=False)
|
||||
port: int = attr.ib(validator=attr.validators.instance_of(int))
|
||||
bind_addresses: List[str]
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls: bool = False
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ManholeConfig:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
username = attr.ib(type=str, validator=attr.validators.instance_of(str))
|
||||
password = attr.ib(type=str, validator=attr.validators.instance_of(str))
|
||||
priv_key = attr.ib(type=Optional[Key])
|
||||
pub_key = attr.ib(type=Optional[Key])
|
||||
username: str = attr.ib(validator=attr.validators.instance_of(str))
|
||||
password: str = attr.ib(validator=attr.validators.instance_of(str))
|
||||
priv_key: Optional[Key]
|
||||
pub_key: Optional[Key]
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class LimitRemoteRoomsConfig:
|
||||
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
||||
complexity: Union[float, int] = attr.ib(
|
||||
validator=attr.validators.instance_of(
|
||||
(float, int) # type: ignore[arg-type] # noqa
|
||||
),
|
||||
default=1.0,
|
||||
)
|
||||
complexity_error: str = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
admins_can_join: bool = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
@@ -353,11 +367,6 @@ class ServerConfig(Config):
|
||||
# (other than those sent by local server admins)
|
||||
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
|
||||
|
||||
# Whether to enable experimental MSC1849 (aka relations) support
|
||||
self.experimental_msc1849_support_enabled = config.get(
|
||||
"experimental_msc1849_support_enabled", True
|
||||
)
|
||||
|
||||
# Options to control access by tracking MAU
|
||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||
self.max_mau_value = 0
|
||||
@@ -443,132 +452,6 @@ class ServerConfig(Config):
|
||||
# events with profile information that differ from the target's global profile.
|
||||
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
||||
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
if self.retention_enabled:
|
||||
logger.info(
|
||||
"Message retention policies support enabled with the following default"
|
||||
" policy: min_lifetime = %s ; max_lifetime = %s",
|
||||
self.retention_default_min_lifetime,
|
||||
self.retention_default_max_lifetime,
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get(
|
||||
"allowed_lifetime_max"
|
||||
)
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min
|
||||
> self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs: List[Dict[str, Optional[int]]] = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
{
|
||||
"interval": interval,
|
||||
"shortest_max_lifetime": shortest_max_lifetime,
|
||||
"longest_max_lifetime": longest_max_lifetime,
|
||||
}
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
{
|
||||
"interval": self.parse_duration("1d"),
|
||||
"shortest_max_lifetime": None,
|
||||
"longest_max_lifetime": None,
|
||||
}
|
||||
]
|
||||
|
||||
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
|
||||
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
@@ -591,25 +474,6 @@ class ServerConfig(Config):
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig:
|
||||
enabled = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
complexity = attr.ib(
|
||||
validator=attr.validators.instance_of(
|
||||
(float, int) # type: ignore[arg-type] # noqa
|
||||
),
|
||||
default=1.0,
|
||||
)
|
||||
complexity_error = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
admins_can_join = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
|
||||
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||
**(config.get("limit_remote_rooms") or {})
|
||||
)
|
||||
@@ -1259,75 +1123,6 @@ class ServerConfig(Config):
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, and the state of a room contains a
|
||||
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
|
||||
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
|
||||
# to these limits when running purge jobs.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
#
|
||||
# If any purge job is configured, it is strongly recommended to have at least
|
||||
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
|
||||
# set, or one job without 'shortest_max_lifetime' and one job without
|
||||
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
|
||||
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
|
||||
# room's policy to these values is done after the policies are retrieved from
|
||||
# Synapse's database (which is done using the range specified in a purge job's
|
||||
# configuration).
|
||||
#
|
||||
#purge_jobs:
|
||||
# - longest_max_lifetime: 3d
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
|
||||
@@ -73,7 +73,9 @@ class ServerNoticesConfig(Config):
|
||||
return
|
||||
|
||||
mxid_localpart = c["system_mxid_localpart"]
|
||||
self.server_notices_mxid = UserID(mxid_localpart, self.server_name).to_string()
|
||||
self.server_notices_mxid = UserID(
|
||||
mxid_localpart, self.root.server.server_name
|
||||
).to_string()
|
||||
self.server_notices_mxid_display_name = c.get("system_mxid_display_name", None)
|
||||
self.server_notices_mxid_avatar_url = c.get("system_mxid_avatar_url", None)
|
||||
# todo: i18n
|
||||
|
||||
@@ -103,8 +103,10 @@ class SSOConfig(Config):
|
||||
# the client's.
|
||||
# public_baseurl is an optional setting, so we only add the fallback's URL to the
|
||||
# list if it's provided (because we can't figure out what that URL is otherwise).
|
||||
if self.public_baseurl:
|
||||
login_fallback_url = self.public_baseurl + "_matrix/static/client/login"
|
||||
if self.root.server.public_baseurl:
|
||||
login_fallback_url = (
|
||||
self.root.server.public_baseurl + "_matrix/static/client/login"
|
||||
)
|
||||
self.sso_client_whitelist.append(login_fallback_url)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
|
||||
@@ -172,9 +172,12 @@ class TlsConfig(Config):
|
||||
)
|
||||
|
||||
# YYYYMMDDhhmmssZ -- in UTC
|
||||
expires_on = datetime.strptime(
|
||||
tls_certificate.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ"
|
||||
)
|
||||
expiry_data = tls_certificate.get_notAfter()
|
||||
if expiry_data is None:
|
||||
raise ValueError(
|
||||
"TLS Certificate has no expiry date, and this is not permitted"
|
||||
)
|
||||
expires_on = datetime.strptime(expiry_data.decode("ascii"), "%Y%m%d%H%M%SZ")
|
||||
now = datetime.utcnow()
|
||||
days_remaining = (expires_on - now).days
|
||||
return days_remaining
|
||||
|
||||
@@ -29,9 +29,12 @@ from twisted.internet.ssl import (
|
||||
TLSVersion,
|
||||
platformTrust,
|
||||
)
|
||||
from twisted.protocols.tls import TLSMemoryBIOProtocol
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.iweb import IPolicyForHTTPS
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -51,7 +54,7 @@ class ServerContextFactory(ContextFactory):
|
||||
per https://github.com/matrix-org/synapse/issues/1691
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
# TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version,
|
||||
# switch to those (see https://github.com/pyca/cryptography/issues/5379).
|
||||
#
|
||||
@@ -64,7 +67,7 @@ class ServerContextFactory(ContextFactory):
|
||||
self.configure_context(self._context, config)
|
||||
|
||||
@staticmethod
|
||||
def configure_context(context, config):
|
||||
def configure_context(context: SSL.Context, config: HomeServerConfig) -> None:
|
||||
try:
|
||||
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||
context.set_tmp_ecdh(_ecCurve)
|
||||
@@ -75,14 +78,15 @@ class ServerContextFactory(ContextFactory):
|
||||
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
|
||||
)
|
||||
context.use_certificate_chain_file(config.tls.tls_certificate_file)
|
||||
assert config.tls.tls_private_key is not None
|
||||
context.use_privatekey(config.tls.tls_private_key)
|
||||
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
context.set_cipher_list(
|
||||
"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM"
|
||||
b"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM"
|
||||
)
|
||||
|
||||
def getContext(self):
|
||||
def getContext(self) -> SSL.Context:
|
||||
return self._context
|
||||
|
||||
|
||||
@@ -98,7 +102,7 @@ class FederationPolicyForHTTPS:
|
||||
constructs an SSLClientConnectionCreator factory accordingly.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
self._config = config
|
||||
|
||||
# Check if we're using a custom list of a CA certificates
|
||||
@@ -131,7 +135,7 @@ class FederationPolicyForHTTPS:
|
||||
self._config.tls.federation_certificate_verification_whitelist
|
||||
)
|
||||
|
||||
def get_options(self, host: bytes):
|
||||
def get_options(self, host: bytes) -> IOpenSSLClientConnectionCreator:
|
||||
# IPolicyForHTTPS.get_options takes bytes, but we want to compare
|
||||
# against the str whitelist. The hostnames in the whitelist are already
|
||||
# IDNA-encoded like the hosts will be here.
|
||||
@@ -153,7 +157,9 @@ class FederationPolicyForHTTPS:
|
||||
|
||||
return SSLClientConnectionCreator(host, ssl_context, should_verify)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
def creatorForNetloc(
|
||||
self, hostname: bytes, port: int
|
||||
) -> IOpenSSLClientConnectionCreator:
|
||||
"""Implements the IPolicyForHTTPS interface so that this can be passed
|
||||
directly to agents.
|
||||
"""
|
||||
@@ -169,16 +175,18 @@ class RegularPolicyForHTTPS:
|
||||
trust root.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
trust_root = platformTrust()
|
||||
self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
|
||||
self._ssl_context.set_info_callback(_context_info_cb)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
def creatorForNetloc(
|
||||
self, hostname: bytes, port: int
|
||||
) -> IOpenSSLClientConnectionCreator:
|
||||
return SSLClientConnectionCreator(hostname, self._ssl_context, True)
|
||||
|
||||
|
||||
def _context_info_cb(ssl_connection, where, ret):
|
||||
def _context_info_cb(ssl_connection: SSL.Connection, where: int, ret: int) -> None:
|
||||
"""The 'information callback' for our openssl context objects.
|
||||
|
||||
Note: Once this is set as the info callback on a Context object, the Context should
|
||||
@@ -204,11 +212,13 @@ class SSLClientConnectionCreator:
|
||||
Replaces twisted.internet.ssl.ClientTLSOptions
|
||||
"""
|
||||
|
||||
def __init__(self, hostname: bytes, ctx, verify_certs: bool):
|
||||
def __init__(self, hostname: bytes, ctx: SSL.Context, verify_certs: bool):
|
||||
self._ctx = ctx
|
||||
self._verifier = ConnectionVerifier(hostname, verify_certs)
|
||||
|
||||
def clientConnectionForTLS(self, tls_protocol):
|
||||
def clientConnectionForTLS(
|
||||
self, tls_protocol: TLSMemoryBIOProtocol
|
||||
) -> SSL.Connection:
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
|
||||
@@ -219,7 +229,7 @@ class SSLClientConnectionCreator:
|
||||
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
|
||||
# tls_protocol so that the SSL context's info callback has something to
|
||||
# call to do the cert verification.
|
||||
tls_protocol._synapse_tls_verifier = self._verifier
|
||||
tls_protocol._synapse_tls_verifier = self._verifier # type: ignore[attr-defined]
|
||||
return connection
|
||||
|
||||
|
||||
@@ -244,7 +254,9 @@ class ConnectionVerifier:
|
||||
self._hostnameBytes = hostname
|
||||
self._hostnameASCII = self._hostnameBytes.decode("ascii")
|
||||
|
||||
def verify_context_info_cb(self, ssl_connection, where):
|
||||
def verify_context_info_cb(
|
||||
self, ssl_connection: SSL.Connection, where: int
|
||||
) -> None:
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
|
||||
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ def compute_content_hash(
|
||||
|
||||
|
||||
def compute_event_reference_hash(
|
||||
event, hash_algorithm: Hasher = hashlib.sha256
|
||||
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
|
||||
) -> Tuple[str, bytes]:
|
||||
"""Computes the event reference hash. This is the hash of the redacted
|
||||
event.
|
||||
|
||||
@@ -87,7 +87,7 @@ class VerifyJsonRequest:
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
) -> "VerifyJsonRequest":
|
||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||
object for the given server.
|
||||
"""
|
||||
@@ -104,7 +104,7 @@ class VerifyJsonRequest:
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
) -> "VerifyJsonRequest":
|
||||
"""Create a VerifyJsonRequest to verify all signatures on an event
|
||||
object for the given server.
|
||||
"""
|
||||
@@ -449,7 +449,9 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]):
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
key_ids_to_fetch = (
|
||||
(queue_value.server_name, key_id)
|
||||
for queue_value in keys_to_fetch
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -41,43 +41,111 @@ from synapse.types import StateMap, UserID, get_domain_from_id
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(
|
||||
room_version_obj: RoomVersion,
|
||||
event: EventBase,
|
||||
auth_events: StateMap[EventBase],
|
||||
do_sig_check: bool = True,
|
||||
do_size_check: bool = True,
|
||||
def validate_event_for_room_version(
|
||||
room_version_obj: RoomVersion, event: EventBase
|
||||
) -> None:
|
||||
"""Checks if this event is correctly authed.
|
||||
"""Ensure that the event complies with the limits, and has the right signatures
|
||||
|
||||
NB: does not *validate* the signatures - it assumes that any signatures present
|
||||
have already been checked.
|
||||
|
||||
NB: it does not check that the event satisfies the auth rules (that is done in
|
||||
check_auth_rules_for_event) - these tests are independent of the rest of the state
|
||||
in the room.
|
||||
|
||||
NB: This is used to check events that have been received over federation. As such,
|
||||
it can only enforce the checks specified in the relevant room version, to avoid
|
||||
a split-brain situation where some servers accept such events, and others reject
|
||||
them.
|
||||
|
||||
TODO: consider moving this into EventValidator
|
||||
|
||||
Args:
|
||||
room_version_obj: the version of the room
|
||||
event: the event being checked.
|
||||
auth_events: the existing room state.
|
||||
do_sig_check: True if it should be verified that the sending server
|
||||
signed the event.
|
||||
do_size_check: True if the size of the event fields should be verified.
|
||||
room_version_obj: the version of the room which contains this event
|
||||
event: the event to be checked
|
||||
|
||||
Raises:
|
||||
AuthError if the checks fail
|
||||
|
||||
Returns:
|
||||
if the auth checks pass.
|
||||
SynapseError if there is a problem with the event
|
||||
"""
|
||||
assert isinstance(auth_events, dict)
|
||||
|
||||
if do_size_check:
|
||||
_check_size_limits(event)
|
||||
_check_size_limits(event)
|
||||
|
||||
if not hasattr(event, "room_id"):
|
||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||
|
||||
room_id = event.room_id
|
||||
# check that the event has the correct signatures
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
# Check the sender's domain has signed the event
|
||||
if not event.signatures.get(sender_domain):
|
||||
# We allow invites via 3pid to have a sender from a different
|
||||
# HS, as the sender must match the sender of the original
|
||||
# 3pid invite. This is checked further down with the
|
||||
# other dedicated membership checks.
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
# Check the origin domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
is_invite_via_allow_rule = (
|
||||
room_version_obj.msc3083_join_rules
|
||||
and event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
)
|
||||
if is_invite_via_allow_rule:
|
||||
authoriser_domain = get_domain_from_id(
|
||||
event.content[EventContentFields.AUTHORISING_USER]
|
||||
)
|
||||
if not event.signatures.get(authoriser_domain):
|
||||
raise AuthError(403, "Event not signed by authorising server")
|
||||
|
||||
|
||||
def check_auth_rules_for_event(
|
||||
room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase]
|
||||
) -> None:
|
||||
"""Check that an event complies with the auth rules
|
||||
|
||||
Checks whether an event passes the auth rules with a given set of state events
|
||||
|
||||
Assumes that we have already checked that the event is the right shape (it has
|
||||
enough signatures, has a room ID, etc). In other words:
|
||||
|
||||
- it's fine for use in state resolution, when we have already decided whether to
|
||||
accept the event or not, and are now trying to decide whether it should make it
|
||||
into the room state
|
||||
|
||||
- when we're doing the initial event auth, it is only suitable in combination with
|
||||
a bunch of other tests.
|
||||
|
||||
Args:
|
||||
room_version_obj: the version of the room
|
||||
event: the event being checked.
|
||||
auth_events: the room state to check the events against.
|
||||
|
||||
Raises:
|
||||
AuthError if the checks fail
|
||||
"""
|
||||
# We need to ensure that the auth events are actually for the same room, to
|
||||
# stop people from using powers they've been granted in other rooms for
|
||||
# example.
|
||||
for auth_event in auth_events.values():
|
||||
#
|
||||
# Arguably we don't need to do this when we're just doing state res, as presumably
|
||||
# the state res algorithm isn't silly enough to give us events from different rooms.
|
||||
# Still, it's easier to do it anyway.
|
||||
room_id = event.room_id
|
||||
for auth_event in auth_events:
|
||||
if auth_event.room_id != room_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
@@ -85,44 +153,12 @@ def check(
|
||||
"which is in room %s"
|
||||
% (event.event_id, room_id, auth_event.event_id, auth_event.room_id),
|
||||
)
|
||||
|
||||
if do_sig_check:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
# Check the sender's domain has signed the event
|
||||
if not event.signatures.get(sender_domain):
|
||||
# We allow invites via 3pid to have a sender from a different
|
||||
# HS, as the sender must match the sender of the original
|
||||
# 3pid invite. This is checked further down with the
|
||||
# other dedicated membership checks.
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
# Check the origin domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
is_invite_via_allow_rule = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
)
|
||||
if is_invite_via_allow_rule:
|
||||
authoriser_domain = get_domain_from_id(
|
||||
event.content[EventContentFields.AUTHORISING_USER]
|
||||
if auth_event.rejected_reason:
|
||||
raise AuthError(
|
||||
403,
|
||||
"During auth for event %s: found rejected event %s in the state"
|
||||
% (event.event_id, auth_event.event_id),
|
||||
)
|
||||
if not event.signatures.get(authoriser_domain):
|
||||
raise AuthError(403, "Event not signed by authorising server")
|
||||
|
||||
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
|
||||
#
|
||||
@@ -148,8 +184,10 @@ def check(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
auth_dict = {(e.type, e.state_key): e for e in auth_events}
|
||||
|
||||
# 3. If event does not have a m.room.create in its auth_events, reject.
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
creation_event = auth_dict.get((EventTypes.Create, ""), None)
|
||||
if not creation_event:
|
||||
raise AuthError(403, "No create event in auth events")
|
||||
|
||||
@@ -157,7 +195,7 @@ def check(
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
if not _can_federate(event, auth_dict):
|
||||
raise AuthError(403, "This room has been marked as unfederatable.")
|
||||
|
||||
# 4. If type is m.room.aliases
|
||||
@@ -179,23 +217,20 @@ def check(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
|
||||
# 5. If type is m.room.membership
|
||||
if event.type == EventTypes.Member:
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_events)
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_dict)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_check_event_sender_in_room(event, auth_events)
|
||||
_check_event_sender_in_room(event, auth_dict)
|
||||
|
||||
# Special case to allow m.room.third_party_invite events wherever
|
||||
# a user is allowed to issue invites. Fixes
|
||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||
if event.type == EventTypes.ThirdPartyInvite:
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
invite_level = get_named_level(auth_events, "invite", 0)
|
||||
user_level = get_user_power_level(event.user_id, auth_dict)
|
||||
invite_level = get_named_level(auth_dict, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
@@ -203,20 +238,20 @@ def check(
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_can_send_event(event, auth_events)
|
||||
_can_send_event(event, auth_dict)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
_check_power_levels(room_version_obj, event, auth_events)
|
||||
_check_power_levels(room_version_obj, event, auth_dict)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(room_version_obj, event, auth_events)
|
||||
check_redaction(room_version_obj, event, auth_dict)
|
||||
|
||||
if (
|
||||
event.type == EventTypes.MSC2716_INSERTION
|
||||
or event.type == EventTypes.MSC2716_BATCH
|
||||
or event.type == EventTypes.MSC2716_MARKER
|
||||
):
|
||||
check_historical(room_version_obj, event, auth_events)
|
||||
check_historical(room_version_obj, event, auth_dict)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
|
||||
@@ -348,12 +348,16 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||
self.__class__.__name__,
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
self.internal_metadata.is_outlier(),
|
||||
rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
|
||||
|
||||
return (
|
||||
f"<{self.__class__.__name__} "
|
||||
f"{rejection}"
|
||||
f"event_id={self.event_id}, "
|
||||
f"type={self.get('type')}, "
|
||||
f"state_key={self.get('state_key')}, "
|
||||
f"outlier={self.internal_metadata.is_outlier()}"
|
||||
">"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -18,10 +18,8 @@ import attr
|
||||
from nacl.signing import SigningKey
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
RoomVersion,
|
||||
)
|
||||
@@ -92,13 +90,13 @@ class EventBuilder:
|
||||
)
|
||||
|
||||
@property
|
||||
def state_key(self):
|
||||
def state_key(self) -> str:
|
||||
if self._state_key is not None:
|
||||
return self._state_key
|
||||
|
||||
raise AttributeError("state_key")
|
||||
|
||||
def is_state(self):
|
||||
def is_state(self) -> bool:
|
||||
return self._state_key is not None
|
||||
|
||||
async def build(
|
||||
@@ -197,24 +195,6 @@ class EventBuilderFactory:
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
|
||||
def new(self, room_version: str, key_values: dict) -> EventBuilder:
|
||||
"""Generate an event builder appropriate for the given room version
|
||||
|
||||
Deprecated: use for_room_version with a RoomVersion object instead
|
||||
|
||||
Args:
|
||||
room_version: Version of the room that we're creating an event builder for
|
||||
key_values: Fields used as the basis of the new event
|
||||
|
||||
Returns:
|
||||
EventBuilder
|
||||
"""
|
||||
v = KNOWN_ROOM_VERSIONS.get(room_version)
|
||||
if not v:
|
||||
# this can happen if support is withdrawn for a room version
|
||||
raise UnsupportedRoomVersionError()
|
||||
return self.for_room_version(v, key_values)
|
||||
|
||||
def for_room_version(
|
||||
self, room_version: RoomVersion, key_values: dict
|
||||
) -> EventBuilder:
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
@@ -33,23 +34,22 @@ if TYPE_CHECKING:
|
||||
GET_USERS_FOR_STATES_CALLBACK = Callable[
|
||||
[Iterable[UserPresenceState]], Awaitable[Dict[str, Set[UserPresenceState]]]
|
||||
]
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[
|
||||
[str], Awaitable[Union[Set[str], "PresenceRouter.ALL_USERS"]]
|
||||
]
|
||||
# This must either return a set of strings or the constant PresenceRouter.ALL_USERS.
|
||||
GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer"):
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
|
||||
if hs.config.presence_router_module_class is None:
|
||||
if hs.config.server.presence_router_module_class is None:
|
||||
return
|
||||
|
||||
module = hs.config.presence_router_module_class
|
||||
config = hs.config.presence_router_config
|
||||
module = hs.config.server.presence_router_module_class
|
||||
config = hs.config.server.presence_router_config
|
||||
api = hs.get_module_api()
|
||||
|
||||
presence_router = module(config=config, module_api=api)
|
||||
@@ -69,9 +69,10 @@ def load_legacy_presence_router(hs: "HomeServer"):
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# f is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
@@ -104,7 +105,7 @@ class PresenceRouter:
|
||||
self,
|
||||
get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None,
|
||||
get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None,
|
||||
):
|
||||
) -> None:
|
||||
# PresenceRouter modules are required to implement both of these methods
|
||||
# or neither of them as they are assumed to act in a complementary manner
|
||||
paired_methods = [get_users_for_states, get_interested_users]
|
||||
@@ -142,7 +143,7 @@ class PresenceRouter:
|
||||
# Don't include any extra destinations for presence updates
|
||||
return {}
|
||||
|
||||
users_for_states = {}
|
||||
users_for_states: Dict[str, Set[UserPresenceState]] = {}
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
try:
|
||||
@@ -171,7 +172,7 @@ class PresenceRouter:
|
||||
|
||||
return users_for_states
|
||||
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]:
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], str]:
|
||||
"""
|
||||
Retrieve a list of users that `user_id` is interested in receiving the
|
||||
presence of. This will be in addition to those they share a room with.
|
||||
|
||||
@@ -11,17 +11,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import StateMap
|
||||
from synapse.types import JsonDict, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage import Storage
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
|
||||
@@ -112,13 +115,13 @@ class EventContext:
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
state_group,
|
||||
state_group_before_event,
|
||||
current_state_ids,
|
||||
prev_state_ids,
|
||||
prev_group=None,
|
||||
delta_ids=None,
|
||||
):
|
||||
state_group: Optional[int],
|
||||
state_group_before_event: Optional[int],
|
||||
current_state_ids: Optional[StateMap[str]],
|
||||
prev_state_ids: Optional[StateMap[str]],
|
||||
prev_group: Optional[int] = None,
|
||||
delta_ids: Optional[StateMap[str]] = None,
|
||||
) -> "EventContext":
|
||||
return EventContext(
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
@@ -129,22 +132,22 @@ class EventContext:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier():
|
||||
def for_outlier() -> "EventContext":
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
Args:
|
||||
event (FrozenEvent): The event that this context relates to
|
||||
event: The event that this context relates to
|
||||
|
||||
Returns:
|
||||
dict
|
||||
The serialized event.
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
@@ -170,17 +173,16 @@ class EventContext:
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def deserialize(storage, input):
|
||||
def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
|
||||
Args:
|
||||
storage (Storage): Used to convert AS ID to AS object and fetch
|
||||
state.
|
||||
input (dict): A dict produced by `serialize`
|
||||
storage: Used to convert AS ID to AS object and fetch state.
|
||||
input: A dict produced by `serialize`
|
||||
|
||||
Returns:
|
||||
EventContext
|
||||
The event context.
|
||||
"""
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
@@ -241,22 +243,25 @@ class EventContext:
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
|
||||
async def get_prev_state_ids(self):
|
||||
async def get_prev_state_ids(self) -> StateMap[str]:
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
Returns {} if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
await self._ensure_fetched()
|
||||
# There *should* be previous state IDs now.
|
||||
assert self._prev_state_ids is not None
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
@@ -264,16 +269,17 @@ class EventContext:
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if we haven't cached the
|
||||
state or if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
Returns None if we haven't cached the state or if state_group is None
|
||||
(which happens when the associated event is an outlier).
|
||||
|
||||
Otherwise, returns the the current state IDs.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
async def _ensure_fetched(self):
|
||||
async def _ensure_fetched(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
@@ -285,46 +291,46 @@ class _AsyncEventContextImpl(EventContext):
|
||||
|
||||
Attributes:
|
||||
|
||||
_storage (Storage)
|
||||
_storage
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
|
||||
None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
_event_type: The type of the event the context is associated with.
|
||||
|
||||
_event_state_key (str): The state_key of the event the context is
|
||||
associated with.
|
||||
_event_state_key: The state_key of the event the context is associated with.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
_prev_state_id: If the event associated with the context is a state event,
|
||||
then `_prev_state_id` is the event_id of the state that was replaced.
|
||||
"""
|
||||
|
||||
# This needs to have a default as we're inheriting
|
||||
_storage = attr.ib(default=None)
|
||||
_prev_state_id = attr.ib(default=None)
|
||||
_event_type = attr.ib(default=None)
|
||||
_event_state_key = attr.ib(default=None)
|
||||
_fetching_state_deferred = attr.ib(default=None)
|
||||
_storage: "Storage" = attr.ib(default=None)
|
||||
_prev_state_id: Optional[str] = attr.ib(default=None)
|
||||
_event_type: str = attr.ib(default=None)
|
||||
_event_state_key: Optional[str] = attr.ib(default=None)
|
||||
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
|
||||
|
||||
async def _ensure_fetched(self):
|
||||
async def _ensure_fetched(self) -> None:
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
return await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
async def _fill_out_state(self):
|
||||
async def _fill_out_state(self) -> None:
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
return
|
||||
|
||||
self._current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
# Set this separately so mypy knows current_state_ids is not None.
|
||||
self._current_state_ids = current_state_ids
|
||||
if self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(self._current_state_ids)
|
||||
self._prev_state_ids = dict(current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
@@ -332,10 +338,12 @@ class _AsyncEventContextImpl(EventContext):
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
self._prev_state_ids = current_state_ids
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
def _encode_state_dict(
|
||||
state_dict: Optional[StateMap[str]],
|
||||
) -> Optional[List[Tuple[str, str, str]]]:
|
||||
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||
JSON we need to convert them to a form that can.
|
||||
"""
|
||||
@@ -345,7 +353,9 @@ def _encode_state_dict(state_dict):
|
||||
return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]
|
||||
|
||||
|
||||
def _decode_state_dict(input):
|
||||
def _decode_state_dict(
|
||||
input: Optional[List[Tuple[str, str, str]]]
|
||||
) -> Optional[StateMap[str]]:
|
||||
"""Decodes a state dict encoded using `_encode_state_dict` above"""
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
@@ -44,7 +44,9 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
|
||||
[str, List[str], List[Dict[str, str]]], Awaitable[bool]
|
||||
@@ -75,7 +77,7 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
"""Wrapper that loads spam checkers configured using the old configuration, and
|
||||
registers the spam checker hooks they implement.
|
||||
"""
|
||||
@@ -127,9 +129,9 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
request_info: Collection[Tuple[str, str]],
|
||||
auth_provider_id: Optional[str],
|
||||
) -> Union[Awaitable[RegistrationBehaviour], RegistrationBehaviour]:
|
||||
# We've already made sure f is not None above, but mypy doesn't
|
||||
# do well across function boundaries so we need to tell it f is
|
||||
# definitely not None.
|
||||
# Assertion required because mypy can't prove we won't
|
||||
# change `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return f(
|
||||
@@ -144,9 +146,10 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
"Bad signature for callback check_registration_for_spam",
|
||||
)
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# wrapped_func is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert wrapped_func is not None
|
||||
|
||||
return maybe_awaitable(wrapped_func(*args, **kwargs))
|
||||
@@ -163,9 +166,13 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self._user_may_send_3pid_invite_callbacks: List[
|
||||
USER_MAY_SEND_3PID_INVITE_CALLBACK
|
||||
] = []
|
||||
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
||||
self._user_may_create_room_with_invites_callbacks: List[
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||
@@ -187,7 +194,9 @@ class SpamChecker:
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
|
||||
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
||||
user_may_create_room_with_invites: Optional[
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||
@@ -201,14 +210,22 @@ class SpamChecker:
|
||||
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
|
||||
] = None,
|
||||
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if check_event_for_spam is not None:
|
||||
self._check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
|
||||
if user_may_join_room is not None:
|
||||
self._user_may_join_room_callbacks.append(user_may_join_room)
|
||||
|
||||
if user_may_invite is not None:
|
||||
self._user_may_invite_callbacks.append(user_may_invite)
|
||||
|
||||
if user_may_send_3pid_invite is not None:
|
||||
self._user_may_send_3pid_invite_callbacks.append(
|
||||
user_may_send_3pid_invite,
|
||||
)
|
||||
|
||||
if user_may_create_room is not None:
|
||||
self._user_may_create_room_callbacks.append(user_may_create_room)
|
||||
|
||||
@@ -259,6 +276,26 @@ class SpamChecker:
|
||||
|
||||
return False
|
||||
|
||||
async def user_may_join_room(
|
||||
self, user_id: str, room_id: str, is_invited: bool
|
||||
) -> bool:
|
||||
"""Checks if a given users is allowed to join a room.
|
||||
Not called when a user creates a room.
|
||||
|
||||
Args:
|
||||
userid: The ID of the user wanting to join the room
|
||||
room_id: The ID of the room the user wants to join
|
||||
is_invited: Whether the user is invited into the room
|
||||
|
||||
Returns:
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
if await callback(user_id, room_id, is_invited) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_invite(
|
||||
self, inviter_userid: str, invitee_userid: str, room_id: str
|
||||
) -> bool:
|
||||
@@ -280,6 +317,31 @@ class SpamChecker:
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_send_3pid_invite(
|
||||
self, inviter_userid: str, medium: str, address: str, room_id: str
|
||||
) -> bool:
|
||||
"""Checks if a given user may invite a given threepid into the room
|
||||
|
||||
If this method returns false, the threepid invite will be rejected.
|
||||
|
||||
Note that if the threepid is already associated with a Matrix user ID, Synapse
|
||||
will call user_may_invite with said user ID instead.
|
||||
|
||||
Args:
|
||||
inviter_userid: The user ID of the sender of the invitation
|
||||
medium: The 3PID's medium (e.g. "email")
|
||||
address: The 3PID's address (e.g. "alice@example.com")
|
||||
room_id: The room ID
|
||||
|
||||
Returns:
|
||||
True if the user may send the invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
if await callback(inviter_userid, medium, address, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room(self, userid: str) -> bool:
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
@@ -38,7 +38,7 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[
|
||||
]
|
||||
|
||||
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a third party event rules module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
@@ -77,9 +77,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
event: EventBase,
|
||||
state_events: StateMap[EventBase],
|
||||
) -> Tuple[bool, Optional[dict]]:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(event, state_events)
|
||||
@@ -98,9 +98,9 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
async def wrap_on_create_room(
|
||||
requester: Requester, config: dict, is_requester_admin: bool
|
||||
) -> None:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
# Assertion required because mypy can't prove we won't change
|
||||
# `f` back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
res = await f(requester, config, is_requester_admin)
|
||||
@@ -112,9 +112,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
|
||||
return wrap_on_create_room
|
||||
|
||||
def run(*args, **kwargs):
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# f is definitely not None.
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
@@ -162,7 +163,7 @@ class ThirdPartyEventRules:
|
||||
check_visibility_can_be_modified: Optional[
|
||||
CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK
|
||||
] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Register callbacks from modules for each hook."""
|
||||
if check_event_allowed is not None:
|
||||
self._check_event_allowed_callbacks.append(check_event_allowed)
|
||||
@@ -217,6 +218,15 @@ class ThirdPartyEventRules:
|
||||
for callback in self._check_event_allowed_callbacks:
|
||||
try:
|
||||
res, replacement_data = await callback(event, state_events)
|
||||
except SynapseError as e:
|
||||
# FIXME: Being able to throw SynapseErrors is relied upon by
|
||||
# some modules. PR #10386 accidentally broke this ability.
|
||||
# That said, we aren't keen on exposing this implementation detail
|
||||
# to modules and we should one day have a proper way to do what
|
||||
# is wanted.
|
||||
# This module callback needs a rework so that hacks such as
|
||||
# this one are not necessary.
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
@@ -13,18 +13,32 @@
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
import re
|
||||
from typing import Any, Mapping, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from . import EventBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
# by a match for 'stuff'.
|
||||
@@ -65,7 +79,7 @@ def prune_event(event: EventBase) -> EventBase:
|
||||
return pruned_event
|
||||
|
||||
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDict:
|
||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||
operates on dicts rather than event objects
|
||||
|
||||
@@ -97,7 +111,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
|
||||
new_content = {}
|
||||
|
||||
def add_fields(*fields):
|
||||
def add_fields(*fields: str) -> None:
|
||||
for field in fields:
|
||||
if field in event_dict["content"]:
|
||||
new_content[field] = event_dict["content"][field]
|
||||
@@ -151,7 +165,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
|
||||
allowed_fields["content"] = new_content
|
||||
|
||||
unsigned = {}
|
||||
unsigned: JsonDict = {}
|
||||
allowed_fields["unsigned"] = unsigned
|
||||
|
||||
event_unsigned = event_dict.get("unsigned", {})
|
||||
@@ -164,16 +178,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
return allowed_fields
|
||||
|
||||
|
||||
def _copy_field(src, dst, field):
|
||||
def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None:
|
||||
"""Copy the field in 'src' to 'dst'.
|
||||
|
||||
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||
then dst={"foo":{"bar":5}}.
|
||||
|
||||
Args:
|
||||
src(dict): The dict to read from.
|
||||
dst(dict): The dict to modify.
|
||||
field(list<str>): List of keys to drill down to in 'src'.
|
||||
src: The dict to read from.
|
||||
dst: The dict to modify.
|
||||
field: List of keys to drill down to in 'src'.
|
||||
"""
|
||||
if len(field) == 0: # this should be impossible
|
||||
return
|
||||
@@ -205,7 +219,7 @@ def _copy_field(src, dst, field):
|
||||
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||
|
||||
|
||||
def only_fields(dictionary, fields):
|
||||
def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
|
||||
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||
in 'fields'.
|
||||
|
||||
@@ -215,11 +229,11 @@ def only_fields(dictionary, fields):
|
||||
A literal '.' character in a field name may be escaped using a '\'.
|
||||
|
||||
Args:
|
||||
dictionary(dict): The dictionary to read from.
|
||||
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||
dictionary: The dictionary to read from.
|
||||
fields: A list of fields to copy over. Only shallow refs are
|
||||
taken.
|
||||
Returns:
|
||||
dict: A new dictionary with only the given fields. If fields was empty,
|
||||
A new dictionary with only the given fields. If fields was empty,
|
||||
the same dictionary is returned.
|
||||
"""
|
||||
if len(fields) == 0:
|
||||
@@ -235,17 +249,17 @@ def only_fields(dictionary, fields):
|
||||
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
|
||||
]
|
||||
|
||||
output = {}
|
||||
output: JsonDict = {}
|
||||
for field_array in split_fields:
|
||||
_copy_field(dictionary, output, field_array)
|
||||
return output
|
||||
|
||||
|
||||
def format_event_raw(d):
|
||||
def format_event_raw(d: JsonDict) -> JsonDict:
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v1(d):
|
||||
def format_event_for_client_v1(d: JsonDict) -> JsonDict:
|
||||
d = format_event_for_client_v2(d)
|
||||
|
||||
sender = d.get("sender")
|
||||
@@ -267,7 +281,7 @@ def format_event_for_client_v1(d):
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2(d):
|
||||
def format_event_for_client_v2(d: JsonDict) -> JsonDict:
|
||||
drop_keys = (
|
||||
"auth_events",
|
||||
"prev_events",
|
||||
@@ -282,37 +296,37 @@ def format_event_for_client_v2(d):
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2_without_room_id(d):
|
||||
def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict:
|
||||
d = format_event_for_client_v2(d)
|
||||
d.pop("room_id", None)
|
||||
return d
|
||||
|
||||
|
||||
def serialize_event(
|
||||
e,
|
||||
time_now_ms,
|
||||
as_client_event=True,
|
||||
event_format=format_event_for_client_v1,
|
||||
token_id=None,
|
||||
only_event_fields=None,
|
||||
include_stripped_room_state=False,
|
||||
):
|
||||
e: Union[JsonDict, EventBase],
|
||||
time_now_ms: int,
|
||||
as_client_event: bool = True,
|
||||
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1,
|
||||
token_id: Optional[str] = None,
|
||||
only_event_fields: Optional[List[str]] = None,
|
||||
include_stripped_room_state: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
Args:
|
||||
e (EventBase)
|
||||
time_now_ms (int)
|
||||
as_client_event (bool)
|
||||
e
|
||||
time_now_ms
|
||||
as_client_event
|
||||
event_format
|
||||
token_id
|
||||
only_event_fields
|
||||
include_stripped_room_state (bool): Some events can have stripped room state
|
||||
include_stripped_room_state: Some events can have stripped room state
|
||||
stored in the `unsigned` field. This is required for invite and knock
|
||||
functionality. If this option is False, that state will be removed from the
|
||||
event before it is returned. Otherwise, it will be kept.
|
||||
|
||||
Returns:
|
||||
dict
|
||||
The serialized event dictionary.
|
||||
"""
|
||||
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
@@ -369,25 +383,28 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
self.experimental_msc1849_support_enabled = (
|
||||
hs.config.experimental_msc1849_support_enabled
|
||||
)
|
||||
self._msc1849_enabled = hs.config.experimental.msc1849_enabled
|
||||
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
|
||||
|
||||
async def serialize_event(
|
||||
self, event, time_now, bundle_aggregations=True, **kwargs
|
||||
):
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
time_now: int,
|
||||
bundle_aggregations: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> JsonDict:
|
||||
"""Serializes a single event.
|
||||
|
||||
Args:
|
||||
event (EventBase)
|
||||
time_now (int): The current time in milliseconds
|
||||
bundle_aggregations (bool): Whether to bundle in related events
|
||||
event
|
||||
time_now: The current time in milliseconds
|
||||
bundle_aggregations: Whether to bundle in related events
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
dict: The serialized event
|
||||
The serialized event
|
||||
"""
|
||||
# To handle the case of presence events and the like
|
||||
if not isinstance(event, EventBase):
|
||||
@@ -400,7 +417,7 @@ class EventClientSerializer:
|
||||
# we need to bundle in with the event.
|
||||
# Do not bundle relations if the event has been redacted
|
||||
if not event.internal_metadata.is_redacted() and (
|
||||
self.experimental_msc1849_support_enabled and bundle_aggregations
|
||||
self._msc1849_enabled and bundle_aggregations
|
||||
):
|
||||
annotations = await self.store.get_aggregation_groups_for_event(event_id)
|
||||
references = await self.store.get_relations_for_event(
|
||||
@@ -446,27 +463,45 @@ class EventClientSerializer:
|
||||
"sender": edit.sender,
|
||||
}
|
||||
|
||||
# If this event is the start of a thread, include a summary of the replies.
|
||||
if self._msc3440_enabled:
|
||||
(
|
||||
thread_count,
|
||||
latest_thread_event,
|
||||
) = await self.store.get_thread_summary(event_id)
|
||||
if latest_thread_event:
|
||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||
r[RelationTypes.THREAD] = {
|
||||
# Don't bundle aggregations as this could recurse forever.
|
||||
"latest_event": await self.serialize_event(
|
||||
latest_thread_event, time_now, bundle_aggregations=False
|
||||
),
|
||||
"count": thread_count,
|
||||
}
|
||||
|
||||
return serialized_event
|
||||
|
||||
def serialize_events(self, events, time_now, **kwargs):
|
||||
async def serialize_events(
|
||||
self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any
|
||||
) -> List[JsonDict]:
|
||||
"""Serializes multiple events.
|
||||
|
||||
Args:
|
||||
event (iter[EventBase])
|
||||
time_now (int): The current time in milliseconds
|
||||
event
|
||||
time_now: The current time in milliseconds
|
||||
**kwargs: Arguments to pass to `serialize_event`
|
||||
|
||||
Returns:
|
||||
Deferred[list[dict]]: The list of serialized events
|
||||
The list of serialized events
|
||||
"""
|
||||
return yieldable_gather_results(
|
||||
return await yieldable_gather_results(
|
||||
self.serialize_event, events, time_now=time_now, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
):
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
@@ -475,7 +510,7 @@ def copy_power_levels_contents(
|
||||
if not isinstance(old_power_levels, collections.abc.Mapping):
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels = {}
|
||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
@@ -483,7 +518,8 @@ def copy_power_levels_contents(
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
power_levels[k] = h = {}
|
||||
h: Dict[str, int] = {}
|
||||
power_levels[k] = h
|
||||
for k1, v1 in v.items():
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
@@ -498,7 +534,7 @@ def copy_power_levels_contents(
|
||||
return power_levels
|
||||
|
||||
|
||||
def validate_canonicaljson(value: Any):
|
||||
def validate_canonicaljson(value: Any) -> None:
|
||||
"""
|
||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
from typing import Union
|
||||
from typing import Iterable, Union
|
||||
|
||||
import jsonschema
|
||||
|
||||
@@ -28,11 +28,11 @@ from synapse.events.utils import (
|
||||
validate_canonicaljson,
|
||||
)
|
||||
from synapse.federation.federation_server import server_matches_acl_event
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
from synapse.types import EventID, JsonDict, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator:
|
||||
def validate_new(self, event: EventBase, config: HomeServerConfig):
|
||||
def validate_new(self, event: EventBase, config: HomeServerConfig) -> None:
|
||||
"""Validates the event has roughly the right format
|
||||
|
||||
Args:
|
||||
@@ -116,7 +116,7 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def _validate_retention(self, event: EventBase):
|
||||
def _validate_retention(self, event: EventBase) -> None:
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
format enforced by the spec.
|
||||
|
||||
@@ -156,7 +156,7 @@ class EventValidator:
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def validate_builder(self, event: Union[EventBase, EventBuilder]):
|
||||
def validate_builder(self, event: Union[EventBase, EventBuilder]) -> None:
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
fields an event would have
|
||||
@@ -204,14 +204,14 @@ class EventValidator:
|
||||
|
||||
self._ensure_state_event(event)
|
||||
|
||||
def _ensure_strings(self, d, keys):
|
||||
def _ensure_strings(self, d: JsonDict, keys: Iterable[str]) -> None:
|
||||
for s in keys:
|
||||
if s not in d:
|
||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||
if not isinstance(d[s], str):
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
def _ensure_state_event(self, event):
|
||||
def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None:
|
||||
if not event.is_state():
|
||||
raise SynapseError(400, "'%s' must be state events" % (event.type,))
|
||||
|
||||
@@ -244,7 +244,9 @@ POWER_LEVELS_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
def _create_power_level_validator():
|
||||
# This could return something newer than Draft 7, but that's the current "latest"
|
||||
# validator.
|
||||
def _create_power_level_validator() -> jsonschema.Draft7Validator:
|
||||
validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
|
||||
|
||||
# by default jsonschema does not consider a frozendict to be an object so
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -25,11 +26,15 @@ from synapse.events.utils import prune_event, validate_canonicaljson
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationBase:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self.server_name = hs.hostname
|
||||
|
||||
@@ -467,7 +467,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def on_room_state_request(
|
||||
self, origin: str, room_id: str, event_id: Optional[str]
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
) -> Tuple[int, JsonDict]:
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
@@ -481,7 +481,7 @@ class FederationServer(FederationBase):
|
||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||
# the point of the linearizer.
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
resp = dict(
|
||||
resp: JsonDict = dict(
|
||||
await self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
@@ -1008,7 +1008,10 @@ class FederationServer(FederationBase):
|
||||
async with lock:
|
||||
logger.info("handling received PDU: %s", event)
|
||||
try:
|
||||
await self._federation_event_handler.on_receive_pdu(origin, event)
|
||||
with nested_logging_context(event.event_id):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
except FederationError as e:
|
||||
# XXX: Ideally we'd inform the remote we failed to process
|
||||
# the event, but we can't return an error in the transaction
|
||||
@@ -1058,11 +1061,12 @@ class FederationServer(FederationBase):
|
||||
|
||||
origin, event = next
|
||||
|
||||
lock = await self.store.try_acquire_lock(
|
||||
new_lock = await self.store.try_acquire_lock(
|
||||
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
|
||||
)
|
||||
if not lock:
|
||||
if not new_lock:
|
||||
return
|
||||
lock = new_lock
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
@@ -117,7 +117,7 @@ class PublicRoomList(BaseFederationServlet):
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_room_list_handler()
|
||||
self.allow_access = hs.config.allow_public_rooms_over_federation
|
||||
self.allow_access = hs.config.server.allow_public_rooms_over_federation
|
||||
|
||||
async def on_GET(
|
||||
self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]]
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseHandler:
|
||||
"""
|
||||
Common base class for the event handlers.
|
||||
|
||||
Deprecated: new code should not use this. Instead, Handler classes should define the
|
||||
fields they actually need. The utility methods should either be factored out to
|
||||
standalone helper functions, or to different Handler classes.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.distributor = hs.get_distributor()
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
)
|
||||
self._rc_message = self.hs.config.ratelimiting.rc_message
|
||||
|
||||
# Check whether ratelimiting room admin message redaction is enabled
|
||||
# by the presence of rate limits in the config
|
||||
if self.hs.config.ratelimiting.rc_admin_redaction:
|
||||
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second,
|
||||
burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count,
|
||||
)
|
||||
else:
|
||||
self.admin_redaction_ratelimiter = None
|
||||
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
|
||||
async def ratelimit(
|
||||
self,
|
||||
requester: Requester,
|
||||
update: bool = True,
|
||||
is_admin_redaction: bool = False,
|
||||
) -> None:
|
||||
"""Ratelimits requests.
|
||||
|
||||
Args:
|
||||
requester
|
||||
update: Whether to record that a request is being processed.
|
||||
Set to False when doing multiple checks for one request (e.g.
|
||||
to check up front if we would reject the request), and set to
|
||||
True for the last call for a given request.
|
||||
is_admin_redaction: Whether this is a room admin/moderator
|
||||
redacting an event. If so then we may apply different
|
||||
ratelimits depending on config.
|
||||
|
||||
Raises:
|
||||
LimitExceededError if the request should be ratelimited
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# The AS user itself is never rate limited.
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
messages_per_second = self._rc_message.per_second
|
||||
burst_count = self._rc_message.burst_count
|
||||
|
||||
# Check if there is a per user override in the DB.
|
||||
override = await self.store.get_ratelimit_for_user(user_id)
|
||||
if override:
|
||||
# If overridden with a null Hz then ratelimiting has been entirely
|
||||
# disabled for the user
|
||||
if not override.messages_per_second:
|
||||
return
|
||||
|
||||
messages_per_second = override.messages_per_second
|
||||
burst_count = override.burst_count
|
||||
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester,
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
)
|
||||
@@ -67,12 +67,8 @@ class AccountValidityHandler:
|
||||
and self._account_validity_renew_by_email_enabled
|
||||
):
|
||||
# Don't do email-specific configuration if renewal by email is disabled.
|
||||
self._template_html = (
|
||||
hs.config.account_validity.account_validity_template_html
|
||||
)
|
||||
self._template_text = (
|
||||
hs.config.account_validity.account_validity_template_text
|
||||
)
|
||||
self._template_html = hs.config.email.account_validity_template_html
|
||||
self._template_text = hs.config.email.account_validity_template_text
|
||||
self._renew_email_subject = (
|
||||
hs.config.account_validity.account_validity_renew_email_subject
|
||||
)
|
||||
|
||||
@@ -21,18 +21,15 @@ from synapse.events import EventBase
|
||||
from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AdminHandler(BaseHandler):
|
||||
class AdminHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
|
||||
@@ -185,19 +185,26 @@ class ApplicationServicesHandler:
|
||||
new_token: Optional[int],
|
||||
users: Optional[Collection[Union[str, UserID]]] = None,
|
||||
) -> None:
|
||||
"""This is called by the notifier in the background
|
||||
when a ephemeral event handled by the homeserver.
|
||||
"""
|
||||
This is called by the notifier in the background when an ephemeral event is handled
|
||||
by the homeserver.
|
||||
|
||||
This will determine which appservices
|
||||
are interested in the event, and submit them.
|
||||
|
||||
Events will only be pushed to appservices
|
||||
that have opted into ephemeral events
|
||||
This will determine which appservices are interested in the event, and submit them.
|
||||
|
||||
Args:
|
||||
stream_key: The stream the event came from.
|
||||
new_token: The latest stream token
|
||||
users: The user(s) involved with the event.
|
||||
|
||||
`stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other
|
||||
value for `stream_key` will cause this function to return early.
|
||||
|
||||
Ephemeral events will only be pushed to appservices that have opted into
|
||||
them.
|
||||
|
||||
Appservices will only receive ephemeral events that fall within their
|
||||
registered user and room namespaces.
|
||||
|
||||
new_token: The latest stream token.
|
||||
users: The users that should be informed of the new event, if any.
|
||||
"""
|
||||
if not self.notify_appservices:
|
||||
return
|
||||
@@ -232,21 +239,32 @@ class ApplicationServicesHandler:
|
||||
for service in services:
|
||||
# Only handle typing if we have the latest token
|
||||
if stream_key == "typing_key" and new_token is not None:
|
||||
# Note that we don't persist the token (via set_type_stream_id_for_appservice)
|
||||
# for typing_key due to performance reasons and due to their highly
|
||||
# ephemeral nature.
|
||||
#
|
||||
# Instead we simply grab the latest typing updates in _handle_typing
|
||||
# and, if they apply to this application service, send it off.
|
||||
events = await self._handle_typing(service, new_token)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
# We don't persist the token for typing_key for performance reasons
|
||||
|
||||
elif stream_key == "receipt_key":
|
||||
events = await self._handle_receipts(service)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
|
||||
# Persist the latest handled stream token for this appservice
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "read_receipt", new_token
|
||||
)
|
||||
|
||||
elif stream_key == "presence_key":
|
||||
events = await self._handle_presence(service, users)
|
||||
if events:
|
||||
self.scheduler.submit_ephemeral_events_for_as(service, events)
|
||||
|
||||
# Persist the latest handled stream token for this appservice
|
||||
await self.store.set_type_stream_id_for_appservice(
|
||||
service, "presence", new_token
|
||||
)
|
||||
@@ -254,18 +272,54 @@ class ApplicationServicesHandler:
|
||||
async def _handle_typing(
|
||||
self, service: ApplicationService, new_token: int
|
||||
) -> List[JsonDict]:
|
||||
"""
|
||||
Return the typing events since the given stream token that the given application
|
||||
service should receive.
|
||||
|
||||
First fetch all typing events between the given typing stream token (non-inclusive)
|
||||
and the latest typing event stream token (inclusive). Then return only those typing
|
||||
events that the given application service may be interested in.
|
||||
|
||||
Args:
|
||||
service: The application service to check for which events it should receive.
|
||||
new_token: A typing event stream token.
|
||||
|
||||
Returns:
|
||||
A list of JSON dictionaries containing data derived from the typing events that
|
||||
should be sent to the given application service.
|
||||
"""
|
||||
typing_source = self.event_sources.sources.typing
|
||||
# Get the typing events from just before current
|
||||
typing, _ = await typing_source.get_new_events_as(
|
||||
service=service,
|
||||
# For performance reasons, we don't persist the previous
|
||||
# token in the DB and instead fetch the latest typing information
|
||||
# token in the DB and instead fetch the latest typing event
|
||||
# for appservices.
|
||||
# TODO: It'd likely be more efficient to simply fetch the
|
||||
# typing event with the given 'new_token' stream token and
|
||||
# check if the given service was interested, rather than
|
||||
# iterating over all typing events and only grabbing the
|
||||
# latest few.
|
||||
from_key=new_token - 1,
|
||||
)
|
||||
return typing
|
||||
|
||||
async def _handle_receipts(self, service: ApplicationService) -> List[JsonDict]:
|
||||
"""
|
||||
Return the latest read receipts that the given application service should receive.
|
||||
|
||||
First fetch all read receipts between the last receipt stream token that this
|
||||
application service should have previously received (non-inclusive) and the
|
||||
latest read receipt stream token (inclusive). Then from that set, return only
|
||||
those read receipts that the given application service may be interested in.
|
||||
|
||||
Args:
|
||||
service: The application service to check for which events it should receive.
|
||||
|
||||
Returns:
|
||||
A list of JSON dictionaries containing data derived from the read receipts that
|
||||
should be sent to the given application service.
|
||||
"""
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
service, "read_receipt"
|
||||
)
|
||||
@@ -278,6 +332,22 @@ class ApplicationServicesHandler:
|
||||
async def _handle_presence(
|
||||
self, service: ApplicationService, users: Collection[Union[str, UserID]]
|
||||
) -> List[JsonDict]:
|
||||
"""
|
||||
Return the latest presence updates that the given application service should receive.
|
||||
|
||||
First, filter the given users list to those that the application service is
|
||||
interested in. Then retrieve the latest presence updates since the
|
||||
the last-known previously received presence stream token for the given
|
||||
application service. Return those presence updates.
|
||||
|
||||
Args:
|
||||
service: The application service that ephemeral events are being sent to.
|
||||
users: The users that should receive the presence update.
|
||||
|
||||
Returns:
|
||||
A list of json dictionaries containing data derived from the presence events
|
||||
that should be sent to the given application service.
|
||||
"""
|
||||
events: List[JsonDict] = []
|
||||
presence_source = self.event_sources.sources.presence
|
||||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
@@ -290,9 +360,9 @@ class ApplicationServicesHandler:
|
||||
interested = await service.is_interested_in_presence(user, self.store)
|
||||
if not interested:
|
||||
continue
|
||||
|
||||
presence_events, _ = await presence_source.get_new_events(
|
||||
user=user,
|
||||
service=service,
|
||||
from_key=from_key,
|
||||
)
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -52,7 +52,6 @@ from synapse.api.errors import (
|
||||
UserDeactivatedError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.handlers._base import BaseHandler
|
||||
from synapse.handlers.ui_auth import (
|
||||
INTERACTIVE_AUTH_CHECKERS,
|
||||
UIAuthSessionDataConstants,
|
||||
@@ -63,7 +62,6 @@ from synapse.http.server import finish_request, respond_with_html
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
@@ -74,6 +72,7 @@ from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.server import HomeServer
|
||||
|
||||
@@ -186,60 +185,28 @@ class LoginTokenAttributes:
|
||||
auth_provider_id = attr.ib(type=str)
|
||||
|
||||
|
||||
class AuthHandler(BaseHandler):
|
||||
class AuthHandler:
|
||||
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.clock = hs.get_clock()
|
||||
self.checkers: Dict[str, UserInteractiveAuthChecker] = {}
|
||||
for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
|
||||
inst = auth_checker_class(hs)
|
||||
if inst.is_enabled():
|
||||
self.checkers[inst.AUTH_TYPE] = inst # type: ignore
|
||||
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
self.bcrypt_rounds = hs.config.registration.bcrypt_rounds
|
||||
|
||||
# we can't use hs.get_module_api() here, because to do so will create an
|
||||
# import loop.
|
||||
#
|
||||
# TODO: refactor this class to separate the lower-level stuff that
|
||||
# ModuleApi can use from the higher-level stuff that uses ModuleApi, as
|
||||
# better way to break the loop
|
||||
account_handler = ModuleApi(hs, self)
|
||||
|
||||
self.password_providers = [
|
||||
PasswordProvider.load(module, config, account_handler)
|
||||
for module, config in hs.config.authproviders.password_providers
|
||||
]
|
||||
|
||||
logger.info("Extra password_providers: %s", self.password_providers)
|
||||
self.password_auth_provider = hs.get_password_auth_provider()
|
||||
|
||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
self._password_enabled = hs.config.auth.password_enabled
|
||||
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
|
||||
|
||||
# start out by assuming PASSWORD is enabled; we will remove it later if not.
|
||||
login_types = set()
|
||||
if self._password_localdb_enabled:
|
||||
login_types.add(LoginType.PASSWORD)
|
||||
|
||||
for provider in self.password_providers:
|
||||
login_types.update(provider.get_supported_login_types().keys())
|
||||
|
||||
if not self._password_enabled:
|
||||
login_types.discard(LoginType.PASSWORD)
|
||||
|
||||
# Some clients just pick the first type in the list. In this case, we want
|
||||
# them to use PASSWORD (rather than token or whatever), so we want to make sure
|
||||
# that comes first, where it's present.
|
||||
self._supported_login_types = []
|
||||
if LoginType.PASSWORD in login_types:
|
||||
self._supported_login_types.append(LoginType.PASSWORD)
|
||||
login_types.remove(LoginType.PASSWORD)
|
||||
self._supported_login_types.extend(login_types)
|
||||
|
||||
# Ratelimiter for failed auth during UIA. Uses same ratelimit config
|
||||
# as per `rc_login.failed_attempts`.
|
||||
self._failed_uia_attempts_ratelimiter = Ratelimiter(
|
||||
@@ -427,11 +394,10 @@ class AuthHandler(BaseHandler):
|
||||
ui_auth_types.add(LoginType.PASSWORD)
|
||||
|
||||
# also allow auth from password providers
|
||||
for provider in self.password_providers:
|
||||
for t in provider.get_supported_login_types().keys():
|
||||
if t == LoginType.PASSWORD and not self._password_enabled:
|
||||
continue
|
||||
ui_auth_types.add(t)
|
||||
for t in self.password_auth_provider.get_supported_login_types().keys():
|
||||
if t == LoginType.PASSWORD and not self._password_enabled:
|
||||
continue
|
||||
ui_auth_types.add(t)
|
||||
|
||||
# if sso is enabled, allow the user to log in via SSO iff they have a mapping
|
||||
# from sso to mxid.
|
||||
@@ -1038,7 +1004,25 @@ class AuthHandler(BaseHandler):
|
||||
Returns:
|
||||
login types
|
||||
"""
|
||||
return self._supported_login_types
|
||||
# Load any login types registered by modules
|
||||
# This is stored in the password_auth_provider so this doesn't trigger
|
||||
# any callbacks
|
||||
types = list(self.password_auth_provider.get_supported_login_types().keys())
|
||||
|
||||
# This list should include PASSWORD if (either _password_localdb_enabled is
|
||||
# true or if one of the modules registered it) AND _password_enabled is true
|
||||
# Also:
|
||||
# Some clients just pick the first type in the list. In this case, we want
|
||||
# them to use PASSWORD (rather than token or whatever), so we want to make sure
|
||||
# that comes first, where it's present.
|
||||
if LoginType.PASSWORD in types:
|
||||
types.remove(LoginType.PASSWORD)
|
||||
if self._password_enabled:
|
||||
types.insert(0, LoginType.PASSWORD)
|
||||
elif self._password_localdb_enabled and self._password_enabled:
|
||||
types.insert(0, LoginType.PASSWORD)
|
||||
|
||||
return types
|
||||
|
||||
async def validate_login(
|
||||
self,
|
||||
@@ -1217,15 +1201,20 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
known_login_type = False
|
||||
|
||||
for provider in self.password_providers:
|
||||
supported_login_types = provider.get_supported_login_types()
|
||||
if login_type not in supported_login_types:
|
||||
# this password provider doesn't understand this login type
|
||||
continue
|
||||
|
||||
# Check if login_type matches a type registered by one of the modules
|
||||
# We don't need to remove LoginType.PASSWORD from the list if password login is
|
||||
# disabled, since if that were the case then by this point we know that the
|
||||
# login_type is not LoginType.PASSWORD
|
||||
supported_login_types = self.password_auth_provider.get_supported_login_types()
|
||||
# check if the login type being used is supported by a module
|
||||
if login_type in supported_login_types:
|
||||
# Make a note that this login type is supported by the server
|
||||
known_login_type = True
|
||||
# Get all the fields expected for this login types
|
||||
login_fields = supported_login_types[login_type]
|
||||
|
||||
# go through the login submission and keep track of which required fields are
|
||||
# provided/not provided
|
||||
missing_fields = []
|
||||
login_dict = {}
|
||||
for f in login_fields:
|
||||
@@ -1233,6 +1222,7 @@ class AuthHandler(BaseHandler):
|
||||
missing_fields.append(f)
|
||||
else:
|
||||
login_dict[f] = login_submission[f]
|
||||
# raise an error if any of the expected fields for that login type weren't provided
|
||||
if missing_fields:
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -1240,10 +1230,15 @@ class AuthHandler(BaseHandler):
|
||||
% (login_type, missing_fields),
|
||||
)
|
||||
|
||||
result = await provider.check_auth(username, login_type, login_dict)
|
||||
# call all of the check_auth hooks for that login_type
|
||||
# it will return a result once the first success is found (or None otherwise)
|
||||
result = await self.password_auth_provider.check_auth(
|
||||
username, login_type, login_dict
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
# if no module managed to authenticate the user, then fallback to built in password based auth
|
||||
if login_type == LoginType.PASSWORD and self._password_localdb_enabled:
|
||||
known_login_type = True
|
||||
|
||||
@@ -1282,11 +1277,16 @@ class AuthHandler(BaseHandler):
|
||||
completed login/registration, or `None`. If authentication was
|
||||
unsuccessful, `user_id` and `callback` are both `None`.
|
||||
"""
|
||||
for provider in self.password_providers:
|
||||
result = await provider.check_3pid_auth(medium, address, password)
|
||||
if result:
|
||||
return result
|
||||
# call all of the check_3pid_auth callbacks
|
||||
# Result will be from the first callback that returns something other than None
|
||||
# If all the callbacks return None, then result is also set to None
|
||||
result = await self.password_auth_provider.check_3pid_auth(
|
||||
medium, address, password
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
# if result is None then return (None, None)
|
||||
return None, None
|
||||
|
||||
async def _check_local_password(self, user_id: str, password: str) -> Optional[str]:
|
||||
@@ -1365,13 +1365,12 @@ class AuthHandler(BaseHandler):
|
||||
user_info = await self.auth.get_user_by_access_token(access_token)
|
||||
await self.store.delete_access_token(access_token)
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
await provider.on_logged_out(
|
||||
user_id=user_info.user_id,
|
||||
device_id=user_info.device_id,
|
||||
access_token=access_token,
|
||||
)
|
||||
# see if any modules want to know about this
|
||||
await self.password_auth_provider.on_logged_out(
|
||||
user_id=user_info.user_id,
|
||||
device_id=user_info.device_id,
|
||||
access_token=access_token,
|
||||
)
|
||||
|
||||
# delete pushers associated with this access token
|
||||
if user_info.token_id is not None:
|
||||
@@ -1398,12 +1397,11 @@ class AuthHandler(BaseHandler):
|
||||
user_id, except_token_id=except_token_id, device_id=device_id
|
||||
)
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
for token, _, device_id in tokens_and_devices:
|
||||
await provider.on_logged_out(
|
||||
user_id=user_id, device_id=device_id, access_token=token
|
||||
)
|
||||
# see if any modules want to know about this
|
||||
for token, _, device_id in tokens_and_devices:
|
||||
await self.password_auth_provider.on_logged_out(
|
||||
user_id=user_id, device_id=device_id, access_token=token
|
||||
)
|
||||
|
||||
# delete pushers associated with the access tokens
|
||||
await self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
@@ -1811,40 +1809,230 @@ class MacaroonGenerator:
|
||||
return macaroon
|
||||
|
||||
|
||||
class PasswordProvider:
|
||||
"""Wrapper for a password auth provider module
|
||||
def load_legacy_password_auth_providers(hs: "HomeServer") -> None:
|
||||
module_api = hs.get_module_api()
|
||||
for module, config in hs.config.authproviders.password_providers:
|
||||
load_single_legacy_password_auth_provider(
|
||||
module=module, config=config, api=module_api
|
||||
)
|
||||
|
||||
This class abstracts out all of the backwards-compatibility hacks for
|
||||
password providers, to provide a consistent interface.
|
||||
|
||||
def load_single_legacy_password_auth_provider(
|
||||
module: Type,
|
||||
config: JsonDict,
|
||||
api: "ModuleApi",
|
||||
) -> None:
|
||||
try:
|
||||
provider = module(config=config, account_handler=api)
|
||||
except Exception as e:
|
||||
logger.error("Error while initializing %r: %s", module, e)
|
||||
raise
|
||||
|
||||
# The known hooks. If a module implements a method who's name appears in this set
|
||||
# we'll want to register it
|
||||
password_auth_provider_methods = {
|
||||
"check_3pid_auth",
|
||||
"on_logged_out",
|
||||
}
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
# We need to wrap check_password because its old form would return a boolean
|
||||
# but we now want it to behave just like check_auth() and return the matrix id of
|
||||
# the user if authentication succeeded or None otherwise
|
||||
if f.__name__ == "check_password":
|
||||
|
||||
async def wrapped_check_password(
|
||||
username: str, login_type: str, login_dict: JsonDict
|
||||
) -> Optional[Tuple[str, Optional[Callable]]]:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
assert f is not None
|
||||
|
||||
matrix_user_id = api.get_qualified_user_id(username)
|
||||
password = login_dict["password"]
|
||||
|
||||
is_valid = await f(matrix_user_id, password)
|
||||
|
||||
if is_valid:
|
||||
return matrix_user_id, None
|
||||
|
||||
return None
|
||||
|
||||
return wrapped_check_password
|
||||
|
||||
# We need to wrap check_auth as in the old form it could return
|
||||
# just a str, but now it must return Optional[Tuple[str, Optional[Callable]]
|
||||
if f.__name__ == "check_auth":
|
||||
|
||||
async def wrapped_check_auth(
|
||||
username: str, login_type: str, login_dict: JsonDict
|
||||
) -> Optional[Tuple[str, Optional[Callable]]]:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
assert f is not None
|
||||
|
||||
result = await f(username, login_type, login_dict)
|
||||
|
||||
if isinstance(result, str):
|
||||
return result, None
|
||||
|
||||
return result
|
||||
|
||||
return wrapped_check_auth
|
||||
|
||||
# We need to wrap check_3pid_auth as in the old form it could return
|
||||
# just a str, but now it must return Optional[Tuple[str, Optional[Callable]]
|
||||
if f.__name__ == "check_3pid_auth":
|
||||
|
||||
async def wrapped_check_3pid_auth(
|
||||
medium: str, address: str, password: str
|
||||
) -> Optional[Tuple[str, Optional[Callable]]]:
|
||||
# We've already made sure f is not None above, but mypy doesn't do well
|
||||
# across function boundaries so we need to tell it f is definitely not
|
||||
# None.
|
||||
assert f is not None
|
||||
|
||||
result = await f(medium, address, password)
|
||||
|
||||
if isinstance(result, str):
|
||||
return result, None
|
||||
|
||||
return result
|
||||
|
||||
return wrapped_check_3pid_auth
|
||||
|
||||
def run(*args: Tuple, **kwargs: Dict) -> Awaitable:
|
||||
# mypy doesn't do well across function boundaries so we need to tell it
|
||||
# f is definitely not None.
|
||||
assert f is not None
|
||||
|
||||
return maybe_awaitable(f(*args, **kwargs))
|
||||
|
||||
return run
|
||||
|
||||
# populate hooks with the implemented methods, wrapped with async_wrapper
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(provider, hook, None))
|
||||
for hook in password_auth_provider_methods
|
||||
}
|
||||
|
||||
supported_login_types = {}
|
||||
# call get_supported_login_types and add that to the dict
|
||||
g = getattr(provider, "get_supported_login_types", None)
|
||||
if g is not None:
|
||||
# Note the old module style also called get_supported_login_types at loading time
|
||||
# and it is synchronous
|
||||
supported_login_types.update(g())
|
||||
|
||||
auth_checkers = {}
|
||||
# Legacy modules have a check_auth method which expects to be called with one of
|
||||
# the keys returned by get_supported_login_types. New style modules register a
|
||||
# dictionary of login_type->check_auth_method mappings
|
||||
check_auth = async_wrapper(getattr(provider, "check_auth", None))
|
||||
if check_auth is not None:
|
||||
for login_type, fields in supported_login_types.items():
|
||||
# need tuple(fields) since fields can be any Iterable type (so may not be hashable)
|
||||
auth_checkers[(login_type, tuple(fields))] = check_auth
|
||||
|
||||
# if it has a "check_password" method then it should handle all auth checks
|
||||
# with login type of LoginType.PASSWORD
|
||||
check_password = async_wrapper(getattr(provider, "check_password", None))
|
||||
if check_password is not None:
|
||||
# need to use a tuple here for ("password",) not a list since lists aren't hashable
|
||||
auth_checkers[(LoginType.PASSWORD, ("password",))] = check_password
|
||||
|
||||
api.register_password_auth_provider_callbacks(hooks, auth_checkers=auth_checkers)
|
||||
|
||||
|
||||
CHECK_3PID_AUTH_CALLBACK = Callable[
|
||||
[str, str, str],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
ON_LOGGED_OUT_CALLBACK = Callable[[str, Optional[str], str], Awaitable]
|
||||
CHECK_AUTH_CALLBACK = Callable[
|
||||
[str, str, JsonDict],
|
||||
Awaitable[
|
||||
Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]
|
||||
],
|
||||
]
|
||||
|
||||
|
||||
class PasswordAuthProvider:
|
||||
"""
|
||||
A class that the AuthHandler calls when authenticating users
|
||||
It allows modules to provide alternative methods for authentication
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def load(
|
||||
cls, module: Type, config: JsonDict, module_api: ModuleApi
|
||||
) -> "PasswordProvider":
|
||||
try:
|
||||
pp = module(config=config, account_handler=module_api)
|
||||
except Exception as e:
|
||||
logger.error("Error while initializing %r: %s", module, e)
|
||||
raise
|
||||
return cls(pp, module_api)
|
||||
def __init__(self) -> None:
|
||||
# lists of callbacks
|
||||
self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = []
|
||||
self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = []
|
||||
|
||||
def __init__(self, pp: "PasswordProvider", module_api: ModuleApi):
|
||||
self._pp = pp
|
||||
self._module_api = module_api
|
||||
# Mapping from login type to login parameters
|
||||
self._supported_login_types: Dict[str, Iterable[str]] = {}
|
||||
|
||||
self._supported_login_types = {}
|
||||
# Mapping from login type to auth checker callbacks
|
||||
self.auth_checker_callbacks: Dict[str, List[CHECK_AUTH_CALLBACK]] = {}
|
||||
|
||||
# grandfather in check_password support
|
||||
if hasattr(self._pp, "check_password"):
|
||||
self._supported_login_types[LoginType.PASSWORD] = ("password",)
|
||||
def register_password_auth_provider_callbacks(
|
||||
self,
|
||||
check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None,
|
||||
on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None,
|
||||
auth_checkers: Optional[Dict[Tuple[str, Tuple], CHECK_AUTH_CALLBACK]] = None,
|
||||
) -> None:
|
||||
# Register check_3pid_auth callback
|
||||
if check_3pid_auth is not None:
|
||||
self.check_3pid_auth_callbacks.append(check_3pid_auth)
|
||||
|
||||
g = getattr(self._pp, "get_supported_login_types", None)
|
||||
if g:
|
||||
self._supported_login_types.update(g())
|
||||
# register on_logged_out callback
|
||||
if on_logged_out is not None:
|
||||
self.on_logged_out_callbacks.append(on_logged_out)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self._pp)
|
||||
if auth_checkers is not None:
|
||||
# register a new supported login_type
|
||||
# Iterate through all of the types being registered
|
||||
for (login_type, fields), callback in auth_checkers.items():
|
||||
# Note: fields may be empty here. This would allow a modules auth checker to
|
||||
# be called with just 'login_type' and no password or other secrets
|
||||
|
||||
# Need to check that all the field names are strings or may get nasty errors later
|
||||
for f in fields:
|
||||
if not isinstance(f, str):
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but all parameter names must be strings"
|
||||
% (login_type, fields)
|
||||
)
|
||||
|
||||
# 2 modules supporting the same login type must expect the same fields
|
||||
# e.g. 1 can't expect "pass" if the other expects "password"
|
||||
# so throw an exception if that happens
|
||||
if login_type not in self._supported_login_types.get(login_type, []):
|
||||
self._supported_login_types[login_type] = fields
|
||||
else:
|
||||
fields_currently_supported = self._supported_login_types.get(
|
||||
login_type
|
||||
)
|
||||
if fields_currently_supported != fields:
|
||||
raise RuntimeError(
|
||||
"A module tried to register support for login type: %s with parameters %s"
|
||||
" but another module had already registered support for that type with parameters %s"
|
||||
% (login_type, fields, fields_currently_supported)
|
||||
)
|
||||
|
||||
# Add the new method to the list of auth_checker_callbacks for this login type
|
||||
self.auth_checker_callbacks.setdefault(login_type, []).append(callback)
|
||||
|
||||
def get_supported_login_types(self) -> Mapping[str, Iterable[str]]:
|
||||
"""Get the login types supported by this password provider
|
||||
@@ -1852,20 +2040,15 @@ class PasswordProvider:
|
||||
Returns a map from a login type identifier (such as m.login.password) to an
|
||||
iterable giving the fields which must be provided by the user in the submission
|
||||
to the /login API.
|
||||
|
||||
This wrapper adds m.login.password to the list if the underlying password
|
||||
provider supports the check_password() api.
|
||||
"""
|
||||
|
||||
return self._supported_login_types
|
||||
|
||||
async def check_auth(
|
||||
self, username: str, login_type: str, login_dict: JsonDict
|
||||
) -> Optional[Tuple[str, Optional[Callable]]]:
|
||||
) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]:
|
||||
"""Check if the user has presented valid login credentials
|
||||
|
||||
This wrapper also calls check_password() if the underlying password provider
|
||||
supports the check_password() api and the login type is m.login.password.
|
||||
|
||||
Args:
|
||||
username: user id presented by the client. Either an MXID or an unqualified
|
||||
username.
|
||||
@@ -1879,63 +2062,130 @@ class PasswordProvider:
|
||||
user, and `callback` is an optional callback which will be called with the
|
||||
result from the /login call (including access_token, device_id, etc.)
|
||||
"""
|
||||
# first grandfather in a call to check_password
|
||||
if login_type == LoginType.PASSWORD:
|
||||
check_password = getattr(self._pp, "check_password", None)
|
||||
if check_password:
|
||||
qualified_user_id = self._module_api.get_qualified_user_id(username)
|
||||
is_valid = await check_password(
|
||||
qualified_user_id, login_dict["password"]
|
||||
)
|
||||
if is_valid:
|
||||
return qualified_user_id, None
|
||||
|
||||
check_auth = getattr(self._pp, "check_auth", None)
|
||||
if not check_auth:
|
||||
return None
|
||||
result = await check_auth(username, login_type, login_dict)
|
||||
# Go through all callbacks for the login type until one returns with a value
|
||||
# other than None (i.e. until a callback returns a success)
|
||||
for callback in self.auth_checker_callbacks[login_type]:
|
||||
try:
|
||||
result = await callback(username, login_type, login_dict)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
# Check if the return value is a str or a tuple
|
||||
if isinstance(result, str):
|
||||
# If it's a str, set callback function to None
|
||||
return result, None
|
||||
if result is not None:
|
||||
# Check that the callback returned a Tuple[str, Optional[Callable]]
|
||||
# "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks
|
||||
# result is always the right type, but as it is 3rd party code it might not be
|
||||
|
||||
return result
|
||||
if not isinstance(result, tuple) or len(result) != 2:
|
||||
logger.warning(
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# pull out the two parts of the tuple so we can do type checking
|
||||
str_result, callback_result = result
|
||||
|
||||
# the 1st item in the tuple should be a str
|
||||
if not isinstance(str_result, str):
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# the second should be Optional[Callable]
|
||||
if callback_result is not None:
|
||||
if not callable(callback_result):
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# The result is a (str, Optional[callback]) tuple so return the successful result
|
||||
return result
|
||||
|
||||
# If this point has been reached then none of the callbacks successfully authenticated
|
||||
# the user so return None
|
||||
return None
|
||||
|
||||
async def check_3pid_auth(
|
||||
self, medium: str, address: str, password: str
|
||||
) -> Optional[Tuple[str, Optional[Callable]]]:
|
||||
g = getattr(self._pp, "check_3pid_auth", None)
|
||||
if not g:
|
||||
return None
|
||||
|
||||
) -> Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]]:
|
||||
# This function is able to return a deferred that either
|
||||
# resolves None, meaning authentication failure, or upon
|
||||
# success, to a str (which is the user_id) or a tuple of
|
||||
# (user_id, callback_func), where callback_func should be run
|
||||
# after we've finished everything else
|
||||
result = await g(medium, address, password)
|
||||
|
||||
# Check if the return value is a str or a tuple
|
||||
if isinstance(result, str):
|
||||
# If it's a str, set callback function to None
|
||||
return result, None
|
||||
for callback in self.check_3pid_auth_callbacks:
|
||||
try:
|
||||
result = await callback(medium, address, password)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
return result
|
||||
if result is not None:
|
||||
# Check that the callback returned a Tuple[str, Optional[Callable]]
|
||||
# "type: ignore[unreachable]" is used after some isinstance checks because mypy thinks
|
||||
# result is always the right type, but as it is 3rd party code it might not be
|
||||
|
||||
if not isinstance(result, tuple) or len(result) != 2:
|
||||
logger.warning(
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# pull out the two parts of the tuple so we can do type checking
|
||||
str_result, callback_result = result
|
||||
|
||||
# the 1st item in the tuple should be a str
|
||||
if not isinstance(str_result, str):
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# the second should be Optional[Callable]
|
||||
if callback_result is not None:
|
||||
if not callable(callback_result):
|
||||
logger.warning( # type: ignore[unreachable]
|
||||
"Wrong type returned by module API callback %s: %s, expected"
|
||||
" Optional[Tuple[str, Optional[Callable]]]",
|
||||
callback,
|
||||
result,
|
||||
)
|
||||
continue
|
||||
|
||||
# The result is a (str, Optional[callback]) tuple so return the successful result
|
||||
return result
|
||||
|
||||
# If this point has been reached then none of the callbacks successfully authenticated
|
||||
# the user so return None
|
||||
return None
|
||||
|
||||
async def on_logged_out(
|
||||
self, user_id: str, device_id: Optional[str], access_token: str
|
||||
) -> None:
|
||||
g = getattr(self._pp, "on_logged_out", None)
|
||||
if not g:
|
||||
return
|
||||
|
||||
# This might return an awaitable, if it does block the log out
|
||||
# until it completes.
|
||||
await maybe_awaitable(
|
||||
g(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
access_token=access_token,
|
||||
)
|
||||
)
|
||||
# call all of the on_logged_out callbacks
|
||||
for callback in self.on_logged_out_callbacks:
|
||||
try:
|
||||
callback(user_id, device_id, access_token)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
@@ -19,19 +19,17 @@ from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeactivateAccountHandler(BaseHandler):
|
||||
class DeactivateAccountHandler:
|
||||
"""Handler which deals with deactivating user accounts."""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
|
||||
@@ -14,7 +14,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
@@ -40,8 +51,6 @@ from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
@@ -50,14 +59,16 @@ logger = logging.getLogger(__name__)
|
||||
MAX_DEVICE_DISPLAY_NAME_LEN = 100
|
||||
|
||||
|
||||
class DeviceWorkerHandler(BaseHandler):
|
||||
class DeviceWorkerHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
self.state_store = hs.get_storage().state
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self.server_name = hs.hostname
|
||||
|
||||
@trace
|
||||
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
|
||||
@@ -443,6 +454,10 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
) -> None:
|
||||
"""Notify that a user's device(s) has changed. Pokes the notifier, and
|
||||
remote servers if the user is local.
|
||||
|
||||
Args:
|
||||
user_id: The Matrix ID of the user who's device list has been updated.
|
||||
device_ids: The device IDs that have changed.
|
||||
"""
|
||||
if not device_ids:
|
||||
# No changes to notify about, so this is a no-op.
|
||||
@@ -595,7 +610,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
|
||||
def _update_device_from_client_ips(
|
||||
device: JsonDict, client_ips: Dict[Tuple[str, str], JsonDict]
|
||||
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
|
||||
) -> None:
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
||||
device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
|
||||
|
||||
@@ -31,26 +31,25 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DirectoryHandler(BaseHandler):
|
||||
class DirectoryHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.config = hs.config
|
||||
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
|
||||
self.require_membership = hs.config.require_membership_for_aliases
|
||||
self.require_membership = hs.config.server.require_membership_for_aliases
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
@@ -146,7 +145,7 @@ class DirectoryHandler(BaseHandler):
|
||||
if not self.config.roomdirectory.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias_str
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# Let's just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(403, "Not allowed to create alias")
|
||||
@@ -462,7 +461,7 @@ class DirectoryHandler(BaseHandler):
|
||||
if not self.config.roomdirectory.is_publishing_room_allowed(
|
||||
user_id, room_id, room_aliases
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# Let's just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(403, "Not allowed to publish room")
|
||||
|
||||
@@ -22,7 +22,8 @@ from synapse.api.constants import (
|
||||
RestrictedJoinRuleTypes,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.event_auth import check_auth_rules_for_event
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.snapshot import EventContext
|
||||
@@ -45,21 +46,16 @@ class EventAuthHandler:
|
||||
self._store = hs.get_datastore()
|
||||
self._server_name = hs.hostname
|
||||
|
||||
async def check_from_context(
|
||||
async def check_auth_rules_from_context(
|
||||
self,
|
||||
room_version: str,
|
||||
room_version_obj: RoomVersion,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
do_sig_check: bool = True,
|
||||
) -> None:
|
||||
"""Check an event passes the auth rules at its own auth events"""
|
||||
auth_event_ids = event.auth_event_ids()
|
||||
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values())
|
||||
|
||||
def compute_auth_events(
|
||||
self,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user