Compare commits
160 Commits
erikj/remo
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1bc26a909 | ||
|
|
8e15c92c2f | ||
|
|
557635f69a | ||
|
|
7d90d6ce9b | ||
|
|
7adcb20fc0 | ||
|
|
22a8838f62 | ||
|
|
057ce7b754 | ||
|
|
82eacb0e07 | ||
|
|
daca7b2794 | ||
|
|
c0df6bae06 | ||
|
|
316f89e87f | ||
|
|
387c297489 | ||
|
|
5f1198a67e | ||
|
|
3e831f24ff | ||
|
|
e8ac9ac8ca | ||
|
|
21bd230831 | ||
|
|
c5413d0e9e | ||
|
|
6a8643ff3d | ||
|
|
7958eadcd1 | ||
|
|
1c6a19002c | ||
|
|
64887f06fc | ||
|
|
551d2c3f4b | ||
|
|
d983ced596 | ||
|
|
141b073c7b | ||
|
|
9c76d0561b | ||
|
|
5bba1b4905 | ||
|
|
ac6bfcd52f | ||
|
|
4d6e5a5e99 | ||
|
|
206a7b5f12 | ||
|
|
9752849e2b | ||
|
|
653fe2f3cd | ||
|
|
13b0673b5a | ||
|
|
8dde0bf8b3 | ||
|
|
afb6dcf806 | ||
|
|
41ac128fd3 | ||
|
|
6660912226 | ||
|
|
6482075c95 | ||
|
|
5090f26b63 | ||
|
|
52ed9655ed | ||
|
|
ebdef256b3 | ||
|
|
bd918d874f | ||
|
|
498084228b | ||
|
|
c14f99be46 | ||
|
|
976216959b | ||
|
|
d19bccdbec | ||
|
|
451f25172a | ||
|
|
91143bb24e | ||
|
|
47806b0869 | ||
|
|
a683028d81 | ||
|
|
7562d887e1 | ||
|
|
affaffb0ab | ||
|
|
63fb220e5f | ||
|
|
27c375f812 | ||
|
|
f4833e0c06 | ||
|
|
28c6841102 | ||
|
|
652a6b094d | ||
|
|
d1473f7362 | ||
|
|
dc6366a9bd | ||
|
|
86fb71431c | ||
|
|
b378d98c8f | ||
|
|
7967b36efe | ||
|
|
03318a766c | ||
|
|
2b2985b5cf | ||
|
|
51065c44bb | ||
|
|
6c84778549 | ||
|
|
765473567c | ||
|
|
b65ecaff9b | ||
|
|
4df26abf28 | ||
|
|
25f43faa70 | ||
|
|
8771b1337d | ||
|
|
eba431c539 | ||
|
|
a8803e2b6e | ||
|
|
ac88aca7f7 | ||
|
|
24f07a83e6 | ||
|
|
70f0ffd2fc | ||
|
|
d783880083 | ||
|
|
37623e3382 | ||
|
|
e2a443550e | ||
|
|
ef889c98a6 | ||
|
|
1fb9a2d0bf | ||
|
|
de8f0a03a3 | ||
|
|
d0aee697ac | ||
|
|
d5305000f1 | ||
|
|
e9eb3549d3 | ||
|
|
a61b13c0a1 | ||
|
|
0644ac0989 | ||
|
|
e3bc4617fc | ||
|
|
b85821aca2 | ||
|
|
56c4b47df3 | ||
|
|
4d624f467a | ||
|
|
d11f2dfee5 | ||
|
|
bb4b11846f | ||
|
|
e9444cc74d | ||
|
|
0085dc5abc | ||
|
|
802560211a | ||
|
|
e4ab8676b4 | ||
|
|
10a08ab88a | ||
|
|
fa6679e794 | ||
|
|
8ba086980d | ||
|
|
391bfe9a7b | ||
|
|
787de3190f | ||
|
|
4e0fd35bc9 | ||
|
|
dd2d32dcdb | ||
|
|
fe604a022a | ||
|
|
1350b053da | ||
|
|
0ffa5fb935 | ||
|
|
3ff2251754 | ||
|
|
84936e2264 | ||
|
|
695b73c861 | ||
|
|
59d24c5bef | ||
|
|
e83627926f | ||
|
|
a15c003e5b | ||
|
|
ceaa76970f | ||
|
|
9d25a0ae65 | ||
|
|
d924827da1 | ||
|
|
3853a7edfc | ||
|
|
51a20914a8 | ||
|
|
c1ddbbde4f | ||
|
|
177dae2704 | ||
|
|
69018acbd2 | ||
|
|
294c675033 | ||
|
|
3186324260 | ||
|
|
0f2629ebc6 | ||
|
|
dac4445934 | ||
|
|
79e6d9e4b1 | ||
|
|
ca380881b1 | ||
|
|
55159c48e3 | ||
|
|
ca6ecb8d67 | ||
|
|
8798f2291c | ||
|
|
046175daba | ||
|
|
0c23aa393c | ||
|
|
d9bd62f9d1 | ||
|
|
4b2217ace2 | ||
|
|
a0972085ed | ||
|
|
bdb4c20dc1 | ||
|
|
acb8c81041 | ||
|
|
98a1b84631 | ||
|
|
026a66f2b3 | ||
|
|
a745531c10 | ||
|
|
30c94862b4 | ||
|
|
5d281c10dd | ||
|
|
683d6f75af | ||
|
|
eccacd72cb | ||
|
|
b8c5f6fddb | ||
|
|
272402c4d7 | ||
|
|
05fa06834d | ||
|
|
913f790bb2 | ||
|
|
6982db9651 | ||
|
|
438a8594cb | ||
|
|
e031c7e0cc | ||
|
|
0a88ec0a87 | ||
|
|
b076bc276e | ||
|
|
de0d088adc | ||
|
|
db70435de7 | ||
|
|
495b214f4f | ||
|
|
71f0623de9 | ||
|
|
e694a598f8 | ||
|
|
2b7dd21655 | ||
|
|
c571736c6c | ||
|
|
601b893352 |
@@ -3,7 +3,7 @@
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -16,6 +16,4 @@ database:
|
||||
database: synapse
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
trusted_key_servers: []
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create a PostgresEngine.
|
||||
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||
|
||||
# Connect to postgres to create the base database.
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = db_engine.module.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("CREATE DATABASE synapse;")
|
||||
cur.close()
|
||||
db_conn.close()
|
||||
31
.buildkite/scripts/postgres_exec.py
Executable file
31
.buildkite/scripts/postgres_exec.py
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
||||
@@ -1,10 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - sets up synapse and deps
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
@@ -22,15 +22,36 @@ echo "--- Generate the signing key"
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare the databases"
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
./.buildkite/scripts/create_postgres_db.py
|
||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db"
|
||||
|
||||
# Run the script
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
# Now do the same again, on an empty database.
|
||||
|
||||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .buildkite/test_db.db
|
||||
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -13,6 +13,4 @@ database:
|
||||
database: ".buildkite/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
trusted_key_servers: []
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -273,7 +273,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
|
||||
300
CHANGES.md
300
CHANGES.md
@@ -1,11 +1,307 @@
|
||||
Synapse 1.32.0rc1 (2021-04-13)
|
||||
Synapse 1.35.0rc2 (2021-05-27)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. ([\#10079](https://github.com/matrix-org/synapse/issues/10079))
|
||||
|
||||
|
||||
Synapse 1.35.0rc1 (2021-05-25)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support to allow a user who could join a restricted room to view it in the spaces summary. ([\#9922](https://github.com/matrix-org/synapse/issues/9922), [\#10007](https://github.com/matrix-org/synapse/issues/10007), [\#10038](https://github.com/matrix-org/synapse/issues/10038))
|
||||
- Reduce memory usage when joining very large rooms over federation. ([\#9958](https://github.com/matrix-org/synapse/issues/9958))
|
||||
- Add a configuration option which allows enabling opentracing by user id. ([\#9978](https://github.com/matrix-org/synapse/issues/9978))
|
||||
- Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. ([\#10011](https://github.com/matrix-org/synapse/issues/10011))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. ([\#9991](https://github.com/matrix-org/synapse/issues/9991))
|
||||
- Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. ([\#9995](https://github.com/matrix-org/synapse/issues/9995))
|
||||
- Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. ([\#10002](https://github.com/matrix-org/synapse/issues/10002))
|
||||
- Fixed deletion of new presence stream states from database. ([\#10014](https://github.com/matrix-org/synapse/issues/10014), [\#10033](https://github.com/matrix-org/synapse/issues/10033))
|
||||
- Fixed a bug with very high resolution image uploads throwing internal server errors. ([\#10029](https://github.com/matrix-org/synapse/issues/10029))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. ([\#10045](https://github.com/matrix-org/synapse/issues/10045))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. ([\#9803](https://github.com/matrix-org/synapse/issues/9803))
|
||||
- Clarify documentation around SSO mapping providers generating unique IDs and localparts. ([\#9980](https://github.com/matrix-org/synapse/issues/9980))
|
||||
- Updates to the PostgreSQL documentation (`postgres.md`). ([\#9988](https://github.com/matrix-org/synapse/issues/9988), [\#9989](https://github.com/matrix-org/synapse/issues/9989))
|
||||
- Fix broken link in user directory documentation. Contributed by @junquera. ([\#10016](https://github.com/matrix-org/synapse/issues/10016))
|
||||
- Add missing room state entry to the table of contents of room admin API. ([\#10043](https://github.com/matrix-org/synapse/issues/10043))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. ([\#9280](https://github.com/matrix-org/synapse/issues/9280))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. ([\#9823](https://github.com/matrix-org/synapse/issues/9823))
|
||||
- Update comments in the space summary handler. ([\#9974](https://github.com/matrix-org/synapse/issues/9974))
|
||||
- Minor enhancements to the `@cachedList` descriptor. ([\#9975](https://github.com/matrix-org/synapse/issues/9975))
|
||||
- Split multipart email sending into a dedicated handler. ([\#9977](https://github.com/matrix-org/synapse/issues/9977))
|
||||
- Run `black` on files in the `scripts` directory. ([\#9981](https://github.com/matrix-org/synapse/issues/9981))
|
||||
- Add missing type hints to `synapse.util` module. ([\#9982](https://github.com/matrix-org/synapse/issues/9982))
|
||||
- Simplify a few helper functions. ([\#9984](https://github.com/matrix-org/synapse/issues/9984), [\#9985](https://github.com/matrix-org/synapse/issues/9985), [\#9986](https://github.com/matrix-org/synapse/issues/9986))
|
||||
- Remove unnecessary property from SQLBaseStore. ([\#9987](https://github.com/matrix-org/synapse/issues/9987))
|
||||
- Remove `keylen` param on `LruCache`. ([\#9993](https://github.com/matrix-org/synapse/issues/9993))
|
||||
- Update the Grafana dashboard in `contrib/`. ([\#10001](https://github.com/matrix-org/synapse/issues/10001))
|
||||
- Add a batching queue implementation. ([\#10017](https://github.com/matrix-org/synapse/issues/10017))
|
||||
- Reduce memory usage when verifying signatures on large numbers of events at once. ([\#10018](https://github.com/matrix-org/synapse/issues/10018))
|
||||
- Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). ([\#10036](https://github.com/matrix-org/synapse/issues/10036))
|
||||
- Fix running complement tests with Synapse workers. ([\#10039](https://github.com/matrix-org/synapse/issues/10039))
|
||||
- Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. ([\#10050](https://github.com/matrix-org/synapse/issues/10050))
|
||||
|
||||
|
||||
Synapse 1.34.0 (2021-05-17)
|
||||
===========================
|
||||
|
||||
This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting.
|
||||
|
||||
This release also deprecates the `POST /_synapse/admin/v1/rooms/<room_id>/delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/<room_id>` route instead.
|
||||
|
||||
|
||||
No significant changes since v1.34.0rc1.
|
||||
|
||||
|
||||
Synapse 1.34.0rc1 (2021-05-12)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881))
|
||||
- Add support for `DELETE /_synapse/admin/v1/rooms/<room_id>`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902))
|
||||
- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951))
|
||||
- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916))
|
||||
- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966))
|
||||
- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935))
|
||||
- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945))
|
||||
- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895))
|
||||
- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910))
|
||||
- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928))
|
||||
- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930))
|
||||
- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965))
|
||||
- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Mark as deprecated `POST /_synapse/admin/v1/rooms/<room_id>/delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588))
|
||||
- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882))
|
||||
- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885))
|
||||
- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886))
|
||||
- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904))
|
||||
- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931))
|
||||
- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932))
|
||||
- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959))
|
||||
|
||||
|
||||
Synapse 1.33.2 (2021-05-11)
|
||||
===========================
|
||||
|
||||
Due to the security issue highlighted below, server administrators are encouraged to update Synapse. We are not aware of these vulnerabilities being exploited in the wild.
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
This release fixes a denial of service attack ([CVE-2021-29471](https://github.com/matrix-org/synapse/security/advisories/GHSA-x345-32rc-8h85)) against Synapse's push rules implementation. Server admins are encouraged to upgrade.
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Unpin attrs dependency. ([\#9946](https://github.com/matrix-org/synapse/issues/9946))
|
||||
|
||||
|
||||
Synapse 1.33.1 (2021-05-06)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. ([\#9937](https://github.com/matrix-org/synapse/issues/9937))
|
||||
|
||||
|
||||
Synapse 1.33.0 (2021-05-05)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). ([\#9909](https://github.com/matrix-org/synapse/issues/9909))
|
||||
|
||||
|
||||
Synapse 1.33.0rc2 (2021-04-29)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix tight loop when handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900))
|
||||
|
||||
|
||||
Synapse 1.33.0rc1 (2021-04-28)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814))
|
||||
- Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850))
|
||||
- Return a new template when an user attempts to renew their account multiple times with the same token, stating that their account is set to expire. This replaces the invalid token template that would previously be shown in this case. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. ([\#9726](https://github.com/matrix-org/synapse/issues/9726))
|
||||
- Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. ([\#9788](https://github.com/matrix-org/synapse/issues/9788))
|
||||
- Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. ([\#9802](https://github.com/matrix-org/synapse/issues/9802))
|
||||
- Limit the size of HTTP responses read over federation. ([\#9833](https://github.com/matrix-org/synapse/issues/9833))
|
||||
- Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. ([\#9867](https://github.com/matrix-org/synapse/issues/9867))
|
||||
- Fix a long-standing bug where errors from federation did not propagate to the client. ([\#9868](https://github.com/matrix-org/synapse/issues/9868))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. ([\#9801](https://github.com/matrix-org/synapse/issues/9801))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add a dockerfile for running Synapse in worker-mode under Complement. ([\#9162](https://github.com/matrix-org/synapse/issues/9162))
|
||||
- Apply `pyupgrade` across the codebase. ([\#9786](https://github.com/matrix-org/synapse/issues/9786))
|
||||
- Move some replication processing out of `generic_worker`. ([\#9796](https://github.com/matrix-org/synapse/issues/9796))
|
||||
- Replace `HomeServer.get_config()` with inline references. ([\#9815](https://github.com/matrix-org/synapse/issues/9815))
|
||||
- Rename some handlers and config modules to not duplicate the top-level module. ([\#9816](https://github.com/matrix-org/synapse/issues/9816))
|
||||
- Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. ([\#9817](https://github.com/matrix-org/synapse/issues/9817))
|
||||
- Reduce CPU usage of the user directory by reusing existing calculated room membership. ([\#9821](https://github.com/matrix-org/synapse/issues/9821))
|
||||
- Small speed up for joining large remote rooms. ([\#9825](https://github.com/matrix-org/synapse/issues/9825))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9838](https://github.com/matrix-org/synapse/issues/9838))
|
||||
- Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. ([\#9845](https://github.com/matrix-org/synapse/issues/9845))
|
||||
- Limit length of accepted email addresses. ([\#9855](https://github.com/matrix-org/synapse/issues/9855))
|
||||
- Remove redundant `synapse.types.Collection` type definition. ([\#9856](https://github.com/matrix-org/synapse/issues/9856))
|
||||
- Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. ([\#9858](https://github.com/matrix-org/synapse/issues/9858))
|
||||
- Disable invite rate-limiting by default when running the unit tests. ([\#9871](https://github.com/matrix-org/synapse/issues/9871))
|
||||
- Pass a reactor into `SynapseSite` to make testing easier. ([\#9874](https://github.com/matrix-org/synapse/issues/9874))
|
||||
- Make `DomainSpecificString` an `attrs` class. ([\#9875](https://github.com/matrix-org/synapse/issues/9875))
|
||||
- Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. ([\#9876](https://github.com/matrix-org/synapse/issues/9876))
|
||||
- Remove redundant `_PushHTTPChannel` test class. ([\#9878](https://github.com/matrix-org/synapse/issues/9878))
|
||||
- Remove backwards-compatibility code for Python versions < 3.6. ([\#9879](https://github.com/matrix-org/synapse/issues/9879))
|
||||
- Small performance improvement around handling new local presence updates. ([\#9887](https://github.com/matrix-org/synapse/issues/9887))
|
||||
|
||||
|
||||
Synapse 1.32.2 (2021-04-22)
|
||||
===========================
|
||||
|
||||
This release includes a fix for a regression introduced in 1.32.0.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.32.0 and 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857))
|
||||
|
||||
|
||||
Synapse 1.32.1 (2021-04-21)
|
||||
===========================
|
||||
|
||||
This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
|
||||
However, as this release is still subject to the `LoggingContext` change in 1.32.0,
|
||||
it is recommended to remain on or downgrade to 1.31.0.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. ([\#9854](https://github.com/matrix-org/synapse/issues/9854))
|
||||
|
||||
|
||||
Synapse 1.32.0 (2021-04-20)
|
||||
===========================
|
||||
|
||||
**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
that can overwhelm connected Prometheus instances. This issue was not present in
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183)
|
||||
to clean up any excess writeahead logs.
|
||||
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
modules that import `synapse.logging.context.LoggingContext`, such as
|
||||
[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider).
|
||||
This will be fixed in a later Synapse version.
|
||||
|
||||
**Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+.
|
||||
|
||||
This release removes the deprecated `GET /_synapse/admin/v1/users/<user_id>` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities.
|
||||
|
||||
This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you
|
||||
will need to get a fresh copy of the keys. You can do so with:
|
||||
|
||||
```sh
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
```
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix the log lines of nested logging contexts. Broke in 1.32.0rc1. ([\#9829](https://github.com/matrix-org/synapse/issues/9829))
|
||||
|
||||
|
||||
Synapse 1.32.0rc1 (2021-04-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
79
UPGRADE.rst
79
UPGRADE.rst
@@ -85,9 +85,78 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.34.0
|
||||
====================
|
||||
|
||||
``room_invite_state_types`` configuration setting
|
||||
-----------------------------------------------
|
||||
|
||||
The ``room_invite_state_types`` configuration setting has been deprecated and
|
||||
replaced with ``room_prejoin_state``. See the `sample configuration file <https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515>`_.
|
||||
|
||||
If you have set ``room_invite_state_types`` to the default value you should simply
|
||||
remove it from your configuration file. The default value used to be:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.encryption"
|
||||
- "m.room.name"
|
||||
|
||||
If you have customised this value, you should remove ``room_invite_state_types`` and
|
||||
configure ``room_prejoin_state`` instead.
|
||||
|
||||
|
||||
|
||||
Upgrading to v1.33.0
|
||||
====================
|
||||
|
||||
Account Validity HTML templates can now display a user's expiration date
|
||||
------------------------------------------------------------------------
|
||||
|
||||
This may affect you if you have enabled the account validity feature, and have made use of a
|
||||
custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path``
|
||||
Synapse config options.
|
||||
|
||||
The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the
|
||||
future date of which their account has been renewed until. See the
|
||||
`default template <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_renewed.html>`_
|
||||
for an example of usage.
|
||||
|
||||
ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users
|
||||
when they attempt to renew their account with a valid renewal token that has already been used before. The default
|
||||
template contents can been found
|
||||
`here <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_previously_renewed.html>`_,
|
||||
and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see
|
||||
upon attempting to use a valid renewal token more than once.
|
||||
|
||||
|
||||
Upgrading to v1.32.0
|
||||
====================
|
||||
|
||||
Regression causing connected Prometheus instances to become overwhelmed
|
||||
-----------------------------------------------------------------------
|
||||
|
||||
This release introduces `a regression <https://github.com/matrix-org/synapse/issues/9853>`_
|
||||
that can overwhelm connected Prometheus instances. This issue is not present in
|
||||
Synapse v1.32.0rc1.
|
||||
|
||||
If you have been affected, please downgrade to 1.31.0. You then may need to
|
||||
remove excess writeahead logs in order for Prometheus to recover. Instructions
|
||||
for doing so are provided
|
||||
`here <https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183>`_.
|
||||
|
||||
Dropping support for old Python, Postgres and SQLite versions
|
||||
-------------------------------------------------------------
|
||||
|
||||
In line with our `deprecation policy <https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md>`_,
|
||||
we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream.
|
||||
|
||||
This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+.
|
||||
|
||||
Removal of old List Accounts Admin API
|
||||
--------------------------------------
|
||||
|
||||
@@ -98,6 +167,16 @@ has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``G
|
||||
|
||||
The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
|
||||
|
||||
Application Services must use type ``m.login.application_service`` when registering users
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
In compliance with the
|
||||
`Application Service spec <https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions>`_,
|
||||
Application Services are now required to use the ``m.login.application_service`` type when registering users via the
|
||||
``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0.
|
||||
|
||||
Please ensure your Application Services are up to date.
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add a dockerfile for running Synapse in worker-mode under Complement.
|
||||
@@ -1 +0,0 @@
|
||||
Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan.
|
||||
@@ -1 +0,0 @@
|
||||
Apply `pyupgrade` across the codebase.
|
||||
@@ -1 +0,0 @@
|
||||
Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg.
|
||||
@@ -1 +0,0 @@
|
||||
Move some replication processing out of `generic_worker`.
|
||||
@@ -1 +0,0 @@
|
||||
Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
@@ -1 +0,0 @@
|
||||
Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms.
|
||||
@@ -1 +0,0 @@
|
||||
Replace `HomeServer.get_config()` with inline references.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
@@ -224,16 +224,14 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
@@ -255,7 +253,7 @@ class HomeServer(ReplicationHandler):
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdus([pdu])
|
||||
yield self.replication_layer.send_pdu(pdu)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@@ -267,18 +265,16 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
71
contrib/systemd/override-hardened.conf
Normal file
71
contrib/systemd/override-hardened.conf
Normal file
@@ -0,0 +1,71 @@
|
||||
[Service]
|
||||
# The following directives give the synapse service R/W access to:
|
||||
# - /run/matrix-synapse
|
||||
# - /var/lib/matrix-synapse
|
||||
# - /var/log/matrix-synapse
|
||||
|
||||
RuntimeDirectory=matrix-synapse
|
||||
StateDirectory=matrix-synapse
|
||||
LogsDirectory=matrix-synapse
|
||||
|
||||
######################
|
||||
## Security Sandbox ##
|
||||
######################
|
||||
|
||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
||||
# cannot see or change any real devices
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
|
||||
# We give no capabilities to a service by default
|
||||
CapabilityBoundingSet=
|
||||
AmbientCapabilities=
|
||||
|
||||
# Protect the following from modification:
|
||||
# - The entire filesystem
|
||||
# - sysctl settings and loaded kernel modules
|
||||
# - No modifications allowed to Control Groups
|
||||
# - Hostname
|
||||
# - System Clock
|
||||
ProtectSystem=strict
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
ProtectClock=true
|
||||
ProtectHostname=true
|
||||
|
||||
# Prevent access to the following:
|
||||
# - /home directory
|
||||
# - Kernel logs
|
||||
ProtectHome=tmpfs
|
||||
ProtectKernelLogs=true
|
||||
|
||||
# Make sure that the process can only see PIDs and process details of itself,
|
||||
# and the second option disables seeing details of things like system load and
|
||||
# I/O etc
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
|
||||
# While not needed, we set these options explicitly
|
||||
# - This process has been given access to the host network
|
||||
# - It can also communicate with any IP Address
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
IPAddressAllow=any
|
||||
|
||||
# Restrict system calls to a sane bunch
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged @resources @obsolete
|
||||
|
||||
# Misc restrictions
|
||||
# - Since the process is a python process it needs to be able to write and
|
||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
||||
RestrictSUIDSGID=true
|
||||
RemoveIPC=true
|
||||
NoNewPrivileges=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
LockPersonality=true
|
||||
PrivateUsers=true
|
||||
MemoryDenyWriteExecute=false
|
||||
44
debian/changelog
vendored
44
debian/changelog
vendored
@@ -1,8 +1,48 @@
|
||||
matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.34.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.34.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 May 2021 11:34:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 May 2021 11:17:59 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 May 2021 14:06:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 05 May 2021 14:15:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.32.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Apr 2021 12:43:52 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.32.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 21 Apr 2021 14:00:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
|
||||
|
||||
-- Dan Callahan <danc@element.io> Mon, 12 Apr 2021 13:07:36 +0000
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.32.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Apr 2021 14:28:39 +0100
|
||||
|
||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
||||
|
||||
|
||||
@@ -96,18 +96,48 @@ for port in 8080 8081 8082; do
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
# messages rate limit
|
||||
echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config
|
||||
echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config
|
||||
|
||||
# registration rate limit
|
||||
printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
|
||||
# login rate limit
|
||||
echo 'rc_login:' >> $DIR/etc/$port.config
|
||||
printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
# Disable any rate limiting
|
||||
ratelimiting=$(cat <<-RC
|
||||
rc_message:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_registration:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_login:
|
||||
address:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
account:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
failed_attempts:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_admin_redaction:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_joins:
|
||||
local:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
remote:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
RC
|
||||
)
|
||||
echo "${ratelimiting}" >> $DIR/etc/$port.config
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -191,6 +191,16 @@ whilst running the above `docker run` commands.
|
||||
```
|
||||
--no-healthcheck
|
||||
```
|
||||
|
||||
## Disabling the healthcheck in docker-compose file
|
||||
|
||||
If you wish to disable the healthcheck via docker-compose, append the following to your service configuration.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
disable: true
|
||||
```
|
||||
|
||||
## Setting custom healthcheck on docker run
|
||||
|
||||
If you wish to point the healthcheck at a different port with docker command, add the following
|
||||
@@ -202,14 +212,15 @@ If you wish to point the healthcheck at a different port with docker command, ad
|
||||
## Setting the healthcheck in docker-compose file
|
||||
|
||||
You can add the following to set a custom healthcheck in a docker compose file.
|
||||
You will need version >2.1 for this to work.
|
||||
You will need docker-compose version >2.1 for this to work.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
@@ -9,10 +9,11 @@ formatters:
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
{% if LOG_FILE_PATH %}
|
||||
file:
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: {{ LOG_FILE_PATH or "homeserver.log" }}
|
||||
filename: {{ LOG_FILE_PATH }}
|
||||
when: "midnight"
|
||||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
@@ -29,6 +30,7 @@ handlers:
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
{% endif %}
|
||||
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
|
||||
@@ -184,18 +184,18 @@ stderr_logfile_maxbytes=0
|
||||
"""
|
||||
|
||||
NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
location ~* {endpoint} {
|
||||
location ~* {endpoint} {{
|
||||
proxy_pass {upstream};
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}}
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_type} {
|
||||
upstream {upstream_worker_type} {{
|
||||
{body}
|
||||
}
|
||||
}}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Room State API](#room-state-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
@@ -427,7 +428,7 @@ the new room. Users on other servers will be unaffected.
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id>
|
||||
```
|
||||
|
||||
with a body of:
|
||||
@@ -528,6 +529,15 @@ You will have to manually handle, if you so choose, the following:
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
## Deprecated endpoint
|
||||
|
||||
The previous deprecated API will be removed in a future release, it was:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
It behaves the same way than the current endpoint except the path and the method.
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
|
||||
@@ -42,17 +42,17 @@ To receive OpenTracing spans, start up a Jaeger server. This can be done
|
||||
using docker like so:
|
||||
|
||||
```sh
|
||||
docker run -d --name jaeger
|
||||
docker run -d --name jaeger \
|
||||
-p 6831:6831/udp \
|
||||
-p 6832:6832/udp \
|
||||
-p 5778:5778 \
|
||||
-p 16686:16686 \
|
||||
-p 14268:14268 \
|
||||
jaegertracing/all-in-one:1.13
|
||||
jaegertracing/all-in-one:1
|
||||
```
|
||||
|
||||
Latest documentation is probably at
|
||||
<https://www.jaegertracing.io/docs/1.13/getting-started/>
|
||||
https://www.jaegertracing.io/docs/latest/getting-started.
|
||||
|
||||
## Enable OpenTracing in Synapse
|
||||
|
||||
@@ -62,7 +62,7 @@ as shown in the [sample config](./sample_config.yaml). For example:
|
||||
|
||||
```yaml
|
||||
opentracing:
|
||||
tracer_enabled: true
|
||||
enabled: true
|
||||
homeserver_whitelist:
|
||||
- "mytrustedhomeserver.org"
|
||||
- "*.myotherhomeservers.com"
|
||||
@@ -90,4 +90,4 @@ to two problems, namely:
|
||||
## Configuring Jaeger
|
||||
|
||||
Sampling strategies can be set as in this document:
|
||||
<https://www.jaegertracing.io/docs/1.13/sampling/>
|
||||
<https://www.jaegertracing.io/docs/latest/sampling/>.
|
||||
|
||||
200
docs/postgres.md
200
docs/postgres.md
@@ -1,6 +1,6 @@
|
||||
# Using Postgres
|
||||
|
||||
Postgres version 9.5 or later is known to work.
|
||||
Synapse supports PostgreSQL versions 9.6 or later.
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
@@ -33,28 +33,15 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
Then, create a user ``synapse_user`` with:
|
||||
Then, create a postgres user and a database with:
|
||||
|
||||
# this will prompt for a password for the new user
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
Before you can authenticate with the `synapse_user`, you must create a
|
||||
database that it can access. To create a database, first connect to the
|
||||
database with your database user:
|
||||
createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
|
||||
su - postgres # Or: sudo -u postgres bash
|
||||
psql
|
||||
|
||||
and then run:
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named `synapse` owned by the
|
||||
`synapse_user` user (which must already have been created as above).
|
||||
The above will create a user called `synapse_user`, and a database called
|
||||
`synapse`.
|
||||
|
||||
Note that the PostgreSQL database *must* have the correct encoding set
|
||||
(as shown above), otherwise it will not be able to store UTF8 strings.
|
||||
@@ -63,79 +50,6 @@ You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
## Synapse config
|
||||
|
||||
When you are ready to start using PostgreSQL, edit the `database`
|
||||
@@ -165,18 +79,42 @@ may block for an extended period while it waits for a response from the
|
||||
database server. Example values might be:
|
||||
|
||||
```yaml
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
database:
|
||||
args:
|
||||
# ... as above
|
||||
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
|
||||
## Porting from SQLite
|
||||
|
||||
### Overview
|
||||
@@ -185,9 +123,8 @@ The script `synapse_port_db` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase
|
||||
process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location (while the
|
||||
server is down) and running the port script against that offline
|
||||
database.
|
||||
1. Copy the existing SQLite database to a separate location and run
|
||||
the port script against that offline database.
|
||||
2. Shut down the server. Rerun the port script to port any data that
|
||||
has come in since taking the first snapshot. Restart server against
|
||||
the PostgreSQL database.
|
||||
@@ -245,3 +182,60 @@ PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||
./synctl start
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Alternative auth methods
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -28,7 +28,11 @@ async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
|
||||
which can be given a list of local or remote MXIDs to broadcast known, online user
|
||||
presence to (for those users that the receiving user is considered interested in).
|
||||
It does not include state for users who are currently offline, and it can only be
|
||||
called on workers that support sending federation.
|
||||
called on workers that support sending federation. Additionally, this method must
|
||||
only be called from the process that has been configured to write to the
|
||||
the [presence stream](https://github.com/matrix-org/synapse/blob/master/docs/workers.md#stream-writers).
|
||||
By default, this is the main process, but another worker can be configured to do
|
||||
so.
|
||||
|
||||
### Module structure
|
||||
|
||||
|
||||
@@ -152,6 +152,16 @@ presence:
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -673,33 +683,6 @@ acme:
|
||||
#
|
||||
account_key_file: DATADIR/acme_account.key
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
|
||||
## Federation ##
|
||||
|
||||
@@ -731,6 +714,12 @@ acme:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -810,6 +799,7 @@ caches:
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
@@ -1175,69 +1165,6 @@ url_preview_accept_language:
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
@@ -1432,6 +1359,91 @@ account_threepid_delegates:
|
||||
#auto_join_rooms_for_guests: false
|
||||
|
||||
|
||||
## Account Validity ##
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
# The currently available templates are:
|
||||
#
|
||||
# * account_renewed.html: Displayed to the user after they have successfully
|
||||
# renewed their account.
|
||||
#
|
||||
# * account_previously_renewed.html: Displayed to the user if they attempt to
|
||||
# renew their account with a token that is valid, but that has already
|
||||
# been used. In this case the account is not renewed again.
|
||||
#
|
||||
# * invalid_token.html: Displayed to the user when they try to renew an account
|
||||
# with an unknown or invalid renewal token.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for
|
||||
# default template contents.
|
||||
#
|
||||
# The file name of some of these templates can be configured below for legacy
|
||||
# reasons.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# A custom file name for the 'account_renewed.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "account_renewed.html".
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# A custom file name for the 'invalid_token.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "invalid_token.html".
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
@@ -1482,6 +1494,7 @@ room_prejoin_state:
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
# - m.room.create
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
@@ -1878,7 +1891,7 @@ saml2_config:
|
||||
# sub-properties:
|
||||
#
|
||||
# module: The class name of a custom mapping module. Default is
|
||||
# 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
# 'synapse.handlers.oidc.JinjaOidcMappingProvider'.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
@@ -2805,7 +2818,8 @@ opentracing:
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -2814,19 +2828,26 @@ opentracing:
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
@@ -2895,3 +2916,18 @@ redis:
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
|
||||
# Enable experimental features in Synapse.
|
||||
#
|
||||
# Experimental features might break or be removed without a deprecation
|
||||
# period.
|
||||
#
|
||||
experimental_features:
|
||||
# Support for Spaces (MSC1772), it enables the following:
|
||||
#
|
||||
# * The Spaces Summary API (MSC2946).
|
||||
# * Restricting room membership based on space membership (MSC3083).
|
||||
#
|
||||
# Uncomment to disable support for Spaces.
|
||||
#spaces_enabled: false
|
||||
|
||||
@@ -67,8 +67,8 @@ A custom mapping provider must specify the following methods:
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``sub`` claim of the response.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `sub` claim of the response.
|
||||
* `map_user_attributes(self, userinfo, token, failures)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
@@ -87,7 +87,9 @@ A custom mapping provider must specify the following methods:
|
||||
`localpart` value, such as `john.doe1`.
|
||||
- Returns a dictionary with two keys:
|
||||
- `localpart`: A string, used to generate the Matrix ID. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
* `get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
@@ -106,7 +108,7 @@ A custom mapping provider must specify the following methods:
|
||||
|
||||
Synapse has a built-in OpenID mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py).
|
||||
[`synapse.handlers.oidc.JinjaOidcMappingProvider`](../synapse/handlers/oidc.py).
|
||||
|
||||
## SAML Mapping Providers
|
||||
|
||||
@@ -153,8 +155,8 @@ A custom mapping provider must specify the following methods:
|
||||
information from.
|
||||
- `client_redirect_url` - A string, the URL that the client will be
|
||||
redirected to.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``uid`` claim of the response.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `uid` claim of the response.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
@@ -172,8 +174,10 @@ A custom mapping provider must specify the following methods:
|
||||
redirected to.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - The mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
* `mxid_localpart` - A string, the mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `emails` - A list of emails for the new user. If not provided, will
|
||||
@@ -190,4 +194,4 @@ A custom mapping provider must specify the following methods:
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
|
||||
[`synapse.handlers.saml.DefaultSamlMappingProvider`](../synapse/handlers/saml.py).
|
||||
|
||||
@@ -65,3 +65,33 @@ systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
## Hardening
|
||||
|
||||
**Optional:** If further hardening is desired, the file
|
||||
`override-hardened.conf` may be copied from
|
||||
`contrib/systemd/override-hardened.conf` in this repository to the location
|
||||
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
|
||||
directory may have to be created). It enables certain sandboxing features in
|
||||
systemd to further secure the synapse service. You may read the comments to
|
||||
understand what the override file is doing. The same file will need to be copied
|
||||
to
|
||||
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
|
||||
(this directory may also have to be created) in order to apply the same
|
||||
hardening options to any worker processes.
|
||||
|
||||
Once these files have been copied to their appropriate locations, simply reload
|
||||
systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically
|
||||
be applied at every restart as long as the override files are present at the
|
||||
specified locations.
|
||||
|
||||
```sh
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart services
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
In order to see their effect, you may run `systemd-analyze security
|
||||
matrix-synapse.service` before and after applying the hardening options to see
|
||||
the changes being applied at a glance.
|
||||
|
||||
@@ -216,6 +216,10 @@ Asks the server for the current position of all streams.
|
||||
|
||||
This is used when a worker is shutting down.
|
||||
|
||||
#### FEDERATION_ACK (C)
|
||||
|
||||
Acknowledge receipt of some federation data
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
|
||||
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
|
||||
solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
16
mypy.ini
16
mypy.ini
@@ -41,7 +41,6 @@ files =
|
||||
synapse/push,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
@@ -72,8 +71,13 @@ files =
|
||||
synapse/types.py,
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/daemonize.py,
|
||||
synapse/util/hash.py,
|
||||
synapse/util/iterutils.py,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/module_loader.py,
|
||||
synapse/util/msisdn.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
@@ -81,6 +85,7 @@ files =
|
||||
tests/handlers/test_password_providers.py,
|
||||
tests/rest/client/v1/test_login.py,
|
||||
tests/rest/client/v2_alpha/test_auth.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
@@ -172,3 +177,12 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-txacme.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -21,17 +21,18 @@ DISTS = (
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:focal",
|
||||
"ubuntu:groovy",
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
)
|
||||
|
||||
DESC = '''\
|
||||
DESC = """\
|
||||
Builds .debs for synapse, using a Docker image for the build environment.
|
||||
|
||||
By default, builds for all known distributions, but a list of distributions
|
||||
can be passed on the commandline for debugging.
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
class Builder(object):
|
||||
@@ -45,7 +46,7 @@ class Builder(object):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
print("not building %s due to earlier failure" % (dist, ))
|
||||
print("not building %s due to earlier failure" % (dist,))
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
@@ -67,48 +68,65 @@ class Builder(object):
|
||||
# we tend to get source packages which are full of debs. (We could hack
|
||||
# around that with more magic in the build_debian.sh script, but that
|
||||
# doesn't solve the problem for natively-run dpkg-buildpakage).
|
||||
debsdir = os.path.join(projdir, '../debs')
|
||||
debsdir = os.path.join(projdir, "../debs")
|
||||
os.makedirs(debsdir, exist_ok=True)
|
||||
|
||||
if self.redirect_stdout:
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
|
||||
print("building %s: directing output to %s" % (dist, logfile))
|
||||
stdout = open(logfile, "w")
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
# first build a docker image for the build environment
|
||||
subprocess.check_call([
|
||||
"docker", "build",
|
||||
"--tag", "dh-venv-builder:" + tag,
|
||||
"--build-arg", "distro=" + dist,
|
||||
"-f", "docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"build",
|
||||
"--tag",
|
||||
"dh-venv-builder:" + tag,
|
||||
"--build-arg",
|
||||
"distro=" + dist,
|
||||
"-f",
|
||||
"docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
container_name = "synapse_build_" + tag
|
||||
with self._lock:
|
||||
self.active_containers.add(container_name)
|
||||
|
||||
# then run the build itself
|
||||
subprocess.check_call([
|
||||
"docker", "run",
|
||||
"--rm",
|
||||
"--name", container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--name",
|
||||
container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e",
|
||||
"TARGET_USERID=%i" % (os.getuid(),),
|
||||
"-e",
|
||||
"TARGET_GROUPID=%i" % (os.getgid(),),
|
||||
"-e",
|
||||
"DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self.active_containers.remove(container_name)
|
||||
|
||||
if stdout is not None:
|
||||
stdout.close()
|
||||
print("Completed build of %s" % (dist, ))
|
||||
print("Completed build of %s" % (dist,))
|
||||
|
||||
def kill_containers(self):
|
||||
with self._lock:
|
||||
@@ -116,9 +134,14 @@ class Builder(object):
|
||||
|
||||
for c in active:
|
||||
print("killing container %s" % (c,))
|
||||
subprocess.run([
|
||||
"docker", "kill", c,
|
||||
], stdout=subprocess.DEVNULL)
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"kill",
|
||||
c,
|
||||
],
|
||||
stdout=subprocess.DEVNULL,
|
||||
)
|
||||
with self._lock:
|
||||
self.active_containers.remove(c)
|
||||
|
||||
@@ -129,31 +152,38 @@ def run_builds(dists, jobs=1, skip_tests=False):
|
||||
def sig(signum, _frame):
|
||||
print("Caught SIGINT")
|
||||
builder.kill_containers()
|
||||
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for r in res:
|
||||
for _ in res:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=DESC,
|
||||
)
|
||||
parser.add_argument(
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
"-j",
|
||||
"--jobs",
|
||||
type=int,
|
||||
default=1,
|
||||
help="specify the number of builds to run in parallel",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-check', action='store_true',
|
||||
help='skip running tests after building',
|
||||
"--no-check",
|
||||
action="store_true",
|
||||
help="skip running tests after building",
|
||||
)
|
||||
parser.add_argument(
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
"dist",
|
||||
nargs="*",
|
||||
default=DISTS,
|
||||
help="a list of distributions to build for. Default: %(default)s",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
|
||||
@@ -10,6 +10,9 @@
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
@@ -32,10 +35,26 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
BASE_IMAGE=matrixdotorg/synapse-workers
|
||||
BASE_DOCKERFILE=docker/Dockerfile-workers
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
# And provide some more configuration to complement.
|
||||
export COMPLEMENT_CA=true
|
||||
export COMPLEMENT_VERSION_CHECK_ITERATIONS=500
|
||||
else
|
||||
BASE_IMAGE=matrixdotorg/synapse
|
||||
BASE_DOCKERFILE=docker/Dockerfile
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
docker build -t $BASE_IMAGE -f "$BASE_DOCKERFILE" .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -46,4 +65,4 @@ if [[ -n "$1" ]]; then
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
@@ -54,15 +53,9 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
"server_name": server_name,
|
||||
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
|
||||
|
||||
def fingerprint(certificate):
|
||||
finger = hashlib.sha256(certificate)
|
||||
return {"sha256": encode_base64(finger.digest())}
|
||||
|
||||
|
||||
def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
|
||||
@@ -140,7 +140,7 @@ if __name__ == "__main__":
|
||||
|
||||
definitions = {}
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
@@ -80,8 +80,22 @@ else
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
# Note: this list aims the mirror the one in tox.ini
|
||||
files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite")
|
||||
# Note: this list aims to mirror the one in tox.ini
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
"scripts/export_signing_key"
|
||||
"scripts/generate_config"
|
||||
"scripts/generate_log_config"
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"scripts-dev/update_database"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# It does so by having Synapse generate an up-to-date SQLite DB, then running
|
||||
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
|
||||
|
||||
POSTGRES_HOST="localhost"
|
||||
export PGHOST="localhost"
|
||||
POSTGRES_DB_NAME="synapse_full_schema.$$"
|
||||
|
||||
SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
|
||||
@@ -32,7 +32,7 @@ usage() {
|
||||
while getopts "p:co:h" opt; do
|
||||
case $opt in
|
||||
p)
|
||||
POSTGRES_USERNAME=$OPTARG
|
||||
export PGUSER=$OPTARG
|
||||
;;
|
||||
c)
|
||||
# Print all commands that are being executed
|
||||
@@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$POSTGRES_USERNAME" ]; then
|
||||
if [ -z "$PGUSER" ]; then
|
||||
echo "No postgres username supplied"
|
||||
usage
|
||||
exit 1
|
||||
@@ -84,8 +84,9 @@ fi
|
||||
# Create the output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
|
||||
read -rsp "Postgres password for '$PGUSER': " PGPASSWORD
|
||||
echo ""
|
||||
export PGPASSWORD
|
||||
|
||||
# Exit immediately if a command fails
|
||||
set -e
|
||||
@@ -131,9 +132,9 @@ report_stats: false
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "$POSTGRES_USERNAME"
|
||||
host: "$POSTGRES_HOST"
|
||||
password: "$POSTGRES_PASSWORD"
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_DB_NAME"
|
||||
|
||||
# Suppress the key server warning.
|
||||
@@ -150,7 +151,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
createdb $POSTGRES_DB_NAME
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
@@ -181,7 +182,7 @@ DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
@@ -30,7 +30,11 @@ def exit(status: int = 0, message: Optional[str] = None):
|
||||
def format_plain(public_key: nacl.signing.VerifyKey):
|
||||
print(
|
||||
"%s:%s %s"
|
||||
% (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
|
||||
% (
|
||||
public_key.alg,
|
||||
public_key.version,
|
||||
encode_verify_key_base64(public_key),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -50,7 +54,10 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
|
||||
"key_file",
|
||||
nargs="+",
|
||||
type=argparse.FileType("r"),
|
||||
help="The key file to read",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -63,7 +70,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--expiry-ts",
|
||||
type=int,
|
||||
default=int(time.time() * 1000) + 6*3600000,
|
||||
default=int(time.time() * 1000) + 6 * 3600000,
|
||||
help=(
|
||||
"The expiry time to use for -x, in milliseconds since 1970. The default "
|
||||
"is (now+6h)."
|
||||
|
||||
@@ -11,23 +11,22 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
default="CONFDIR",
|
||||
|
||||
help="The path where the config files are kept. Used to create filenames for "
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--data-dir",
|
||||
default="DATADIR",
|
||||
help="The path where the data files are kept. Used to create filenames for "
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--server-name",
|
||||
default="SERVERNAME",
|
||||
help="The server name. Used to initialise the server_name config param, but also "
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -41,21 +40,22 @@ if __name__ == "__main__":
|
||||
"--generate-secrets",
|
||||
action="store_true",
|
||||
help="Enable generation of new secrets for things like the macaroon_secret_key."
|
||||
"By default, these parameters will be left unset."
|
||||
"By default, these parameters will be left unset.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--output-file",
|
||||
type=argparse.FileType('w'),
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=argparse.FileType("w"),
|
||||
default=sys.stdout,
|
||||
help="File to write the configuration to. Default: stdout",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--header-file",
|
||||
type=argparse.FileType('r'),
|
||||
type=argparse.FileType("r"),
|
||||
help="File from which to read a header, which will be printed before the "
|
||||
"generated config.",
|
||||
"generated config.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -41,7 +41,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=argparse.FileType('r'),
|
||||
type=argparse.FileType("r"),
|
||||
help=(
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
@@ -72,8 +72,8 @@ if __name__ == "__main__":
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
hashed = bcrypt.hashpw(
|
||||
pw.encode('utf8') + password_pepper.encode("utf8"),
|
||||
pw.encode("utf8") + password_pepper.encode("utf8"),
|
||||
bcrypt.gensalt(bcrypt_rounds),
|
||||
).decode('ascii')
|
||||
).decode("ascii")
|
||||
|
||||
print(hashed)
|
||||
|
||||
@@ -294,8 +294,7 @@ class Porter(object):
|
||||
return table, already_ported, total_to_port, forward_chunk, backward_chunk
|
||||
|
||||
async def get_table_constraints(self) -> Dict[str, Set[str]]:
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on.
|
||||
"""
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on."""
|
||||
|
||||
def _get_constraints(txn):
|
||||
# We can pull the information about foreign key constraints out from
|
||||
@@ -504,7 +503,9 @@ class Porter(object):
|
||||
return
|
||||
|
||||
def build_db_store(
|
||||
self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
|
||||
self,
|
||||
db_config: DatabaseConnectionConfig,
|
||||
allow_outdated_version: bool = False,
|
||||
):
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
@@ -634,8 +635,11 @@ class Porter(object):
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data"))
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized", ))
|
||||
"account_data_sequence",
|
||||
("room_account_data", "room_tags_revisions", "account_data"),
|
||||
)
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
|
||||
# Step 3. Get tables.
|
||||
@@ -737,7 +741,7 @@ class Porter(object):
|
||||
return col
|
||||
|
||||
outrows = []
|
||||
for i, row in enumerate(rows):
|
||||
for row in rows:
|
||||
try:
|
||||
outrows.append(
|
||||
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
|
||||
@@ -887,8 +891,7 @@ class Porter(object):
|
||||
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
async def _setup_events_stream_seqs(self) -> None:
|
||||
"""Set the event stream sequences to the correct values.
|
||||
"""
|
||||
"""Set the event stream sequences to the correct values."""
|
||||
|
||||
# We get called before we've ported the events table, so we need to
|
||||
# fetch the current positions from the SQLite store.
|
||||
@@ -910,18 +913,21 @@ class Porter(object):
|
||||
(curr_forward_id + 1,),
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
if curr_backward_id:
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
|
||||
"_setup_events_stream_seqs",
|
||||
_setup_events_stream_seqs_set_pos,
|
||||
)
|
||||
|
||||
async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
|
||||
"""Set a sequence to the correct value.
|
||||
"""
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
@@ -935,26 +941,32 @@ class Porter(object):
|
||||
next_id = max(current_stream_ids) + 1
|
||||
|
||||
def r(txn):
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
|
||||
txn.execute(sql + " %s", (next_id, ))
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
|
||||
txn.execute(sql + " %s", (next_id,))
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_%s" % (sequence_name,), r
|
||||
)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
|
||||
(curr_chain_id,),
|
||||
(curr_chain_id + 1,),
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id", r,
|
||||
)
|
||||
|
||||
if curr_chain_id is not None:
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id",
|
||||
r,
|
||||
)
|
||||
|
||||
|
||||
##############################################
|
||||
@@ -963,8 +975,7 @@ class Porter(object):
|
||||
|
||||
|
||||
class Progress(object):
|
||||
"""Used to report progress of the port
|
||||
"""
|
||||
"""Used to report progress of the port"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
@@ -989,8 +1000,7 @@ class Progress(object):
|
||||
|
||||
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window
|
||||
"""
|
||||
"""Reports progress to a curses window"""
|
||||
|
||||
def __init__(self, stdscr):
|
||||
self.stdscr = stdscr
|
||||
@@ -1015,7 +1025,7 @@ class CursesProgress(Progress):
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
for table, data in self.tables.items():
|
||||
for data in self.tables.values():
|
||||
self.total_processed += data["num_done"] - data["start"]
|
||||
self.total_remaining += data["total"] - data["num_done"]
|
||||
|
||||
@@ -1106,8 +1116,7 @@ class CursesProgress(Progress):
|
||||
|
||||
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal
|
||||
"""
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table, num_done):
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
@@ -18,8 +18,7 @@ ignore =
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
# B007: Subsection of the bugbear suite (TODO: add in remaining fixes)
|
||||
ignore=W503,W504,E203,E731,E501,B007
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
|
||||
[isort]
|
||||
line_length = 88
|
||||
|
||||
@@ -21,8 +21,8 @@ import os
|
||||
import sys
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
if sys.version_info < (3, 6):
|
||||
print("Synapse requires Python 3.6 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.32.0rc1"
|
||||
__version__ = "1.35.0rc2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -12,14 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
|
||||
@@ -36,11 +35,14 @@ from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging import opentracing as opentracing
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.types import Requester, StateMap, UserID, create_requester
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -65,9 +67,10 @@ class Auth:
|
||||
"""
|
||||
FIXME: This class contains a mix of functions for authenticating users
|
||||
of our client-server API and authenticating events added to room graphs.
|
||||
The latter should be moved to synapse.handlers.event_auth.EventAuthHandler.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
@@ -79,19 +82,22 @@ class Auth:
|
||||
|
||||
self._auth_blocking = AuthBlocking(self.hs)
|
||||
|
||||
self._account_validity = hs.config.account_validity
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
):
|
||||
) -> None:
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||
auth_events_by_id = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
@@ -148,17 +154,11 @@ class Auth:
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
async def check_host_in_room(self, room_id, host):
|
||||
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = await self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
return await self.store.is_host_joined(room_id, host)
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
return creation_event.content.get("m.federate", True) is True
|
||||
|
||||
def get_public_keys(self, invite_event):
|
||||
def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
async def get_user_by_req(
|
||||
@@ -167,7 +167,7 @@ class Auth:
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
) -> synapse.types.Requester:
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -193,7 +193,7 @@ class Auth:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = await self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
if user_id and app_service:
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
@@ -203,14 +203,14 @@ class Auth:
|
||||
device_id="dummy-device", # stubbed
|
||||
)
|
||||
|
||||
requester = synapse.types.create_requester(
|
||||
user_id, app_service=app_service
|
||||
)
|
||||
requester = create_requester(user_id, app_service=app_service)
|
||||
|
||||
request.requester = user_id
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("user_id", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
if user_id in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
|
||||
return requester
|
||||
|
||||
@@ -222,7 +222,7 @@ class Auth:
|
||||
shadow_banned = user_info.shadow_banned
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
if self._account_validity_enabled and not allow_expired:
|
||||
if await self.store.is_account_expired(
|
||||
user_info.user_id, self.clock.time_msec()
|
||||
):
|
||||
@@ -248,7 +248,7 @@ class Auth:
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
requester = synapse.types.create_requester(
|
||||
requester = create_requester(
|
||||
user_info.user_id,
|
||||
token_id,
|
||||
is_guest,
|
||||
@@ -263,12 +263,16 @@ class Auth:
|
||||
opentracing.set_tag("user_id", user_info.user_id)
|
||||
if device_id:
|
||||
opentracing.set_tag("device_id", device_id)
|
||||
if user_info.token_owner in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
|
||||
return requester
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
async def _get_appservice_user_id(self, request):
|
||||
async def _get_appservice_user_id(
|
||||
self, request: Request
|
||||
) -> Tuple[Optional[str], Optional[ApplicationService]]:
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
@@ -280,6 +284,9 @@ class Auth:
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
return app_service.sender, app_service
|
||||
|
||||
@@ -384,7 +391,9 @@ class Auth:
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||
def _parse_and_validate_macaroon(
|
||||
self, token: str, rights: str = "access"
|
||||
) -> Tuple[str, bool]:
|
||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||
if and only if rights == access and there isn't an expiry.
|
||||
|
||||
@@ -429,15 +438,16 @@ class Auth:
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
def validate_macaroon(
|
||||
self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str
|
||||
) -> None:
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
Args:
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token required (e.g. "access",
|
||||
"delete_pusher")
|
||||
user_id (str): The user_id required
|
||||
macaroon: The macaroon to validate
|
||||
type_string: The kind of token required (e.g. "access", "delete_pusher")
|
||||
user_id: The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
|
||||
@@ -462,9 +472,7 @@ class Auth:
|
||||
if not service:
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.requester = synapse.types.create_requester(
|
||||
service.sender, app_service=service
|
||||
)
|
||||
request.requester = create_requester(service.sender, app_service=service)
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
@@ -516,7 +524,7 @@ class Auth:
|
||||
|
||||
return auth_ids
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
@@ -551,11 +559,11 @@ class Auth:
|
||||
return user_level >= send_level
|
||||
|
||||
@staticmethod
|
||||
def has_access_token(request: Request):
|
||||
def has_access_token(request: Request) -> bool:
|
||||
"""Checks if the request has an access_token.
|
||||
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
False if no access_token was given, True otherwise.
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
@@ -565,13 +573,13 @@ class Auth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request: Request):
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
The access_token
|
||||
Raises:
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
@@ -646,5 +654,5 @@ class Auth:
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
def check_auth_blocking(self, *args, **kwargs):
|
||||
return self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
async def check_auth_blocking(self, *args, **kwargs) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
|
||||
@@ -13,18 +13,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthBlocking:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
@@ -43,7 +46,7 @@ class AuthBlocking:
|
||||
threepid: Optional[dict] = None,
|
||||
user_type: Optional[str] = None,
|
||||
requester: Optional[Requester] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2 ** 63 - 1
|
||||
|
||||
@@ -107,13 +110,18 @@ class EventTypes:
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
SpaceChild = "m.space.child"
|
||||
SpaceParent = "m.space.parent"
|
||||
MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child"
|
||||
MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class RejectedReason:
|
||||
@@ -171,6 +179,7 @@ class EventContentFields:
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/1772
|
||||
ROOM_TYPE = "type"
|
||||
MSC1772_ROOM_TYPE = "org.matrix.msc1772.type"
|
||||
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
@@ -76,6 +77,9 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -124,17 +128,20 @@ class Ratelimiter:
|
||||
time_delta = time_now_s - time_start
|
||||
performed_count = action_count - time_delta * rate_hz
|
||||
if performed_count < 0:
|
||||
# Allow, reset back to count 1
|
||||
allowed = True
|
||||
performed_count = 0
|
||||
time_start = time_now_s
|
||||
action_count = 1.0
|
||||
elif performed_count > burst_count - 1.0:
|
||||
|
||||
# This check would be easier read as performed_count + n_actions > burst_count,
|
||||
# but performed_count might be a very precise float (with lots of numbers
|
||||
# following the point) in which case Python might round it up when adding it to
|
||||
# n_actions. Writing it this way ensures it doesn't happen.
|
||||
if performed_count > burst_count - n_actions:
|
||||
# Deny, we have exceeded our burst count
|
||||
allowed = False
|
||||
else:
|
||||
# We haven't reached our limit yet
|
||||
allowed = True
|
||||
action_count += 1.0
|
||||
action_count = performed_count + n_actions
|
||||
|
||||
if update:
|
||||
self.actions[key] = (action_count, time_start, rate_hz)
|
||||
@@ -182,6 +189,7 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
@@ -201,6 +209,9 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -216,6 +227,7 @@ class Ratelimiter:
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
_time_now_s=time_now_s,
|
||||
)
|
||||
|
||||
|
||||
@@ -30,12 +30,14 @@ from twisted.internet import defer, error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
@@ -114,6 +116,7 @@ def start_reactor(
|
||||
|
||||
def run():
|
||||
logger.info("Running")
|
||||
setup_jemalloc_stats()
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
@@ -288,7 +291,7 @@ def refresh_certificate(hs):
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
||||
async def start(hs: "synapse.server.HomeServer"):
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
@@ -300,7 +303,6 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon
|
||||
|
||||
Args:
|
||||
hs: homeserver instance
|
||||
listeners: Listener configuration ('listeners' in homeserver.yaml)
|
||||
"""
|
||||
# Set up the SIGHUP machinery.
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
@@ -336,7 +338,7 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon
|
||||
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.start_listening()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
@@ -530,3 +532,25 @@ def sdnotify(state):
|
||||
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
|
||||
# unless systemd is expecting us to notify it.
|
||||
logger.warning("Unable to send notification to systemd: %s", e)
|
||||
|
||||
|
||||
def max_request_body_size(config: HomeServerConfig) -> int:
|
||||
"""Get a suitable maximum size for incoming HTTP requests"""
|
||||
|
||||
# Other than media uploads, the biggest request we expect to see is a fully-loaded
|
||||
# /federation/v1/send request.
|
||||
#
|
||||
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
|
||||
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
|
||||
# json encoding); there is no specced limit to EDUs (see
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3121).
|
||||
#
|
||||
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
|
||||
#
|
||||
max_request_size = 200 * MAX_PDU_SIZE
|
||||
|
||||
# if we have a media repo enabled, we may need to allow larger uploads than that
|
||||
if config.media.can_load_media_repo:
|
||||
max_request_size = max(max_request_size, config.media.max_upload_size)
|
||||
|
||||
return max_request_size
|
||||
|
||||
@@ -70,12 +70,6 @@ class AdminCmdSlavedStore(
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
pass
|
||||
|
||||
def start_listening(self, listeners):
|
||||
pass
|
||||
|
||||
|
||||
async def export_data_command(hs, args):
|
||||
"""Export data for a user.
|
||||
@@ -232,7 +226,7 @@ def start(config_options):
|
||||
|
||||
async def run():
|
||||
with LoggingContext("command"):
|
||||
_base.start(ss, [])
|
||||
_base.start(ss)
|
||||
await args.func(ss, args)
|
||||
|
||||
_base.start_worker_reactor(
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, Iterable, Optional
|
||||
from typing import Dict, Optional
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import IResource
|
||||
@@ -32,7 +32,7 @@ from synapse.api.urls import (
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import register_start
|
||||
from synapse.app._base import max_request_body_size, register_start
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
@@ -55,16 +55,14 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events, login, room
|
||||
from synapse.rest.client.v1 import events, login, presence, room
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
@@ -110,6 +108,7 @@ from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
from synapse.storage.databases.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
@@ -121,26 +120,6 @@ from synapse.util.versionstring import get_version_string
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(RestServlet):
|
||||
"""If presence is disabled this servlet can be used to stub out setting
|
||||
presence status.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_GET(self, request, user_id):
|
||||
await self.auth.get_user_by_req(request)
|
||||
return 200, {"presence": "offline"}
|
||||
|
||||
async def on_PUT(self, request, user_id):
|
||||
await self.auth.get_user_by_req(request)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
@@ -241,6 +220,7 @@ class GenericWorkerSlavedStore(
|
||||
StatsStore,
|
||||
UIAuthWorkerStore,
|
||||
EndToEndRoomKeyStore,
|
||||
PresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
@@ -256,10 +236,8 @@ class GenericWorkerSlavedStore(
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedFilteringStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
@@ -327,10 +305,7 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
user_directory.register_servlets(self, resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
@@ -390,14 +365,16 @@ class GenericWorkerServer(HomeServer):
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
for listener in listeners:
|
||||
def start_listening(self):
|
||||
for listener in self.config.worker_listeners:
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
@@ -475,6 +452,10 @@ def start(config_options):
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
config.server_name,
|
||||
@@ -490,7 +471,7 @@ def start(config_options):
|
||||
# streams. Will no-op if no streams can be written to by this worker.
|
||||
hs.get_replication_streamer()
|
||||
|
||||
register_start(_base.start, hs, config.worker_listeners)
|
||||
register_start(_base.start, hs)
|
||||
|
||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Iterable, Iterator
|
||||
from typing import Iterator
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, IResource
|
||||
@@ -36,7 +36,13 @@ from synapse.api.urls import (
|
||||
WEB_CLIENT_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error, register_start
|
||||
from synapse.app._base import (
|
||||
listen_ssl,
|
||||
listen_tcp,
|
||||
max_request_body_size,
|
||||
quit_with_error,
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -126,19 +132,21 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
root_resource = OptionsResource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
if tls:
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
site,
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
@@ -148,13 +156,7 @@ class SynapseHomeServer(HomeServer):
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
site,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
@@ -273,14 +275,14 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
return resources
|
||||
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
def start_listening(self):
|
||||
if self.config.redis_enabled:
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
for listener in listeners:
|
||||
for listener in self.config.server.listeners:
|
||||
if listener.type == "http":
|
||||
self._listening_services.extend(
|
||||
self._listener_http(self.config, listener)
|
||||
@@ -339,6 +341,10 @@ def setup(config_options):
|
||||
sys.exit(0)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
@@ -412,7 +418,7 @@ def setup(config_options):
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
|
||||
await _base.start(hs, config.listeners)
|
||||
await _base.start(hs)
|
||||
|
||||
hs.get_datastore().db_pool.updates.start_doing_background_updates()
|
||||
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
from typing import Any, Iterable, List, Optional
|
||||
|
||||
from synapse.config import (
|
||||
account_validity,
|
||||
api,
|
||||
appservice,
|
||||
auth,
|
||||
captcha,
|
||||
cas,
|
||||
consent_config,
|
||||
consent,
|
||||
database,
|
||||
emailconfig,
|
||||
experimental,
|
||||
groups,
|
||||
jwt_config,
|
||||
jwt,
|
||||
key,
|
||||
logger,
|
||||
metrics,
|
||||
oidc_config,
|
||||
oidc,
|
||||
password_auth_providers,
|
||||
push,
|
||||
ratelimiting,
|
||||
@@ -23,9 +24,9 @@ from synapse.config import (
|
||||
registration,
|
||||
repository,
|
||||
room_directory,
|
||||
saml2_config,
|
||||
saml2,
|
||||
server,
|
||||
server_notices_config,
|
||||
server_notices,
|
||||
spam_checker,
|
||||
sso,
|
||||
stats,
|
||||
@@ -59,15 +60,16 @@ class RootConfig:
|
||||
captcha: captcha.CaptchaConfig
|
||||
voip: voip.VoipConfig
|
||||
registration: registration.RegistrationConfig
|
||||
account_validity: account_validity.AccountValidityConfig
|
||||
metrics: metrics.MetricsConfig
|
||||
api: api.ApiConfig
|
||||
appservice: appservice.AppServiceConfig
|
||||
key: key.KeyConfig
|
||||
saml2: saml2_config.SAML2Config
|
||||
saml2: saml2.SAML2Config
|
||||
cas: cas.CasConfig
|
||||
sso: sso.SSOConfig
|
||||
oidc: oidc_config.OIDCConfig
|
||||
jwt: jwt_config.JWTConfig
|
||||
oidc: oidc.OIDCConfig
|
||||
jwt: jwt.JWTConfig
|
||||
auth: auth.AuthConfig
|
||||
email: emailconfig.EmailConfig
|
||||
worker: workers.WorkerConfig
|
||||
@@ -76,9 +78,9 @@ class RootConfig:
|
||||
spamchecker: spam_checker.SpamCheckerConfig
|
||||
groups: groups.GroupsConfig
|
||||
userdirectory: user_directory.UserDirectoryConfig
|
||||
consent: consent_config.ConsentConfig
|
||||
consent: consent.ConsentConfig
|
||||
stats: stats.StatsConfig
|
||||
servernotices: server_notices_config.ServerNoticesConfig
|
||||
servernotices: server_notices.ServerNoticesConfig
|
||||
roomdirectory: room_directory.RoomDirectoryConfig
|
||||
thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig
|
||||
tracer: tracer.TracerConfig
|
||||
|
||||
165
synapse/config/account_validity.py
Normal file
165
synapse/config/account_validity.py
Normal file
@@ -0,0 +1,165 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.config._base import Config, ConfigError
|
||||
|
||||
|
||||
class AccountValidityConfig(Config):
|
||||
section = "account_validity"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
account_validity_config = config.get("account_validity") or {}
|
||||
self.account_validity_enabled = account_validity_config.get("enabled", False)
|
||||
self.account_validity_renew_by_email_enabled = (
|
||||
"renew_at" in account_validity_config
|
||||
)
|
||||
|
||||
if self.account_validity_enabled:
|
||||
if "period" in account_validity_config:
|
||||
self.account_validity_period = self.parse_duration(
|
||||
account_validity_config["period"]
|
||||
)
|
||||
else:
|
||||
raise ConfigError("'period' is required when using account validity")
|
||||
|
||||
if "renew_at" in account_validity_config:
|
||||
self.account_validity_renew_at = self.parse_duration(
|
||||
account_validity_config["renew_at"]
|
||||
)
|
||||
|
||||
if "renew_email_subject" in account_validity_config:
|
||||
self.account_validity_renew_email_subject = account_validity_config[
|
||||
"renew_email_subject"
|
||||
]
|
||||
else:
|
||||
self.account_validity_renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
self.account_validity_startup_job_max_delta = (
|
||||
self.account_validity_period * 10.0 / 100.0
|
||||
)
|
||||
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
if not self.public_baseurl:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
# Load account validity templates.
|
||||
account_validity_template_dir = account_validity_config.get("template_dir")
|
||||
|
||||
account_renewed_template_filename = account_validity_config.get(
|
||||
"account_renewed_html_path", "account_renewed.html"
|
||||
)
|
||||
invalid_token_template_filename = account_validity_config.get(
|
||||
"invalid_token_html_path", "invalid_token.html"
|
||||
)
|
||||
|
||||
# Read and store template content
|
||||
(
|
||||
self.account_validity_account_renewed_template,
|
||||
self.account_validity_account_previously_renewed_template,
|
||||
self.account_validity_invalid_token_template,
|
||||
) = self.read_templates(
|
||||
[
|
||||
account_renewed_template_filename,
|
||||
"account_previously_renewed.html",
|
||||
invalid_token_template_filename,
|
||||
],
|
||||
account_validity_template_dir,
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
## Account Validity ##
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
# The currently available templates are:
|
||||
#
|
||||
# * account_renewed.html: Displayed to the user after they have successfully
|
||||
# renewed their account.
|
||||
#
|
||||
# * account_previously_renewed.html: Displayed to the user if they attempt to
|
||||
# renew their account with a token that is valid, but that has already
|
||||
# been used. In this case the account is not renewed again.
|
||||
#
|
||||
# * invalid_token.html: Displayed to the user when they try to renew an account
|
||||
# with an unknown or invalid renewal token.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for
|
||||
# default template contents.
|
||||
#
|
||||
# The file name of some of these templates can be configured below for legacy
|
||||
# reasons.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# A custom file name for the 'account_renewed.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "account_renewed.html".
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# A custom file name for the 'invalid_token.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "invalid_token.html".
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
"""
|
||||
@@ -88,10 +88,6 @@ class ApiConfig(Config):
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
|
||||
if self.spaces_enabled:
|
||||
# MSC1772 suggests adding m.room.create to the prejoin state
|
||||
yield EventTypes.Create
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
|
||||
|
||||
@@ -109,6 +105,8 @@ _DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
# Per MSC1772.
|
||||
EventTypes.Create,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ import re
|
||||
import threading
|
||||
from typing import Callable, Dict
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
# The prefix for all cache factor-related environment variables
|
||||
@@ -189,6 +191,15 @@ class CacheConfig(Config):
|
||||
)
|
||||
self.cache_factors[cache] = factor
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
try:
|
||||
check_requirements("cache_memory")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ DEFAULT_CONFIG = """\
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
|
||||
@@ -299,7 +299,7 @@ class EmailConfig(Config):
|
||||
"client_base_url", email_config.get("riot_base_url", None)
|
||||
)
|
||||
|
||||
if self.account_validity.renew_by_email_enabled:
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html"
|
||||
)
|
||||
|
||||
@@ -29,9 +29,26 @@ class ExperimentalConfig(Config):
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
|
||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool
|
||||
if self.spaces_enabled:
|
||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
# Enable experimental features in Synapse.
|
||||
#
|
||||
# Experimental features might break or be removed without a deprecation
|
||||
# period.
|
||||
#
|
||||
experimental_features:
|
||||
# Support for Spaces (MSC1772), it enables the following:
|
||||
#
|
||||
# * The Spaces Summary API (MSC2946).
|
||||
# * Restricting room membership based on space membership (MSC3083).
|
||||
#
|
||||
# Uncomment to disable support for Spaces.
|
||||
#spaces_enabled: false
|
||||
"""
|
||||
|
||||
@@ -44,6 +44,10 @@ class FederationConfig(Config):
|
||||
"allow_profile_lookup_over_federation", True
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
## Federation ##
|
||||
@@ -75,6 +79,12 @@ class FederationConfig(Config):
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -12,25 +12,25 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import RootConfig
|
||||
from .account_validity import AccountValidityConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .auth import AuthConfig
|
||||
from .cache import CacheConfig
|
||||
from .captcha import CaptchaConfig
|
||||
from .cas import CasConfig
|
||||
from .consent_config import ConsentConfig
|
||||
from .consent import ConsentConfig
|
||||
from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .experimental import ExperimentalConfig
|
||||
from .federation import FederationConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt_config import JWTConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .oidc_config import OIDCConfig
|
||||
from .oidc import OIDCConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .push import PushConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
@@ -39,9 +39,9 @@ from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .room import RoomConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2_config import SAML2Config
|
||||
from .saml2 import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .server_notices import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .sso import SSOConfig
|
||||
from .stats import StatsConfig
|
||||
@@ -57,7 +57,6 @@ class HomeServerConfig(RootConfig):
|
||||
|
||||
config_classes = [
|
||||
ServerConfig,
|
||||
ExperimentalConfig,
|
||||
TlsConfig,
|
||||
FederationConfig,
|
||||
CacheConfig,
|
||||
@@ -68,6 +67,7 @@ class HomeServerConfig(RootConfig):
|
||||
CaptchaConfig,
|
||||
VoipConfig,
|
||||
RegistrationConfig,
|
||||
AccountValidityConfig,
|
||||
MetricsConfig,
|
||||
ApiConfig,
|
||||
AppServiceConfig,
|
||||
@@ -93,4 +93,5 @@ class HomeServerConfig(RootConfig):
|
||||
TracerConfig,
|
||||
WorkerConfig,
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
]
|
||||
|
||||
@@ -31,7 +31,6 @@ from twisted.logger import (
|
||||
)
|
||||
|
||||
import synapse
|
||||
from synapse.app import _base as appbase
|
||||
from synapse.logging._structured import setup_structured_logging
|
||||
from synapse.logging.context import LoggingContextFilter
|
||||
from synapse.logging.filter import MetadataFilter
|
||||
@@ -318,6 +317,8 @@ def setup_logging(
|
||||
# Perform one-time logging configuration.
|
||||
_setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner)
|
||||
# Add a SIGHUP handler to reload the logging configuration, if one is available.
|
||||
from synapse.app import _base as appbase
|
||||
|
||||
appbase.register_sighup(_reload_logging_config, log_config_path)
|
||||
|
||||
# Log immediately so we can grep backwards.
|
||||
|
||||
@@ -14,20 +14,23 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, List, Mapping, Optional, Tuple, Type
|
||||
from typing import Collection, Iterable, List, Mapping, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.types import Collection, JsonDict
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
|
||||
# The module that JinjaOidcMappingProvider is in was renamed, we want to
|
||||
# transparently handle both the same.
|
||||
LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
|
||||
|
||||
class OIDCConfig(Config):
|
||||
@@ -403,6 +406,8 @@ def _parse_oidc_config_dict(
|
||||
"""
|
||||
ump_config = oidc_config.get("user_mapping_provider", {})
|
||||
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER:
|
||||
ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER
|
||||
ump_config.setdefault("config", {})
|
||||
|
||||
(
|
||||
@@ -12,74 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.types import RoomAlias, UserID
|
||||
from synapse.util.stringutils import random_string_with_symbols, strtobool
|
||||
|
||||
|
||||
class AccountValidityConfig(Config):
|
||||
section = "accountvalidity"
|
||||
|
||||
def __init__(self, config, synapse_config):
|
||||
if config is None:
|
||||
return
|
||||
super().__init__()
|
||||
self.enabled = config.get("enabled", False)
|
||||
self.renew_by_email_enabled = "renew_at" in config
|
||||
|
||||
if self.enabled:
|
||||
if "period" in config:
|
||||
self.period = self.parse_duration(config["period"])
|
||||
else:
|
||||
raise ConfigError("'period' is required when using account validity")
|
||||
|
||||
if "renew_at" in config:
|
||||
self.renew_at = self.parse_duration(config["renew_at"])
|
||||
|
||||
if "renew_email_subject" in config:
|
||||
self.renew_email_subject = config["renew_email_subject"]
|
||||
else:
|
||||
self.renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
self.startup_job_max_delta = self.period * 10.0 / 100.0
|
||||
|
||||
if self.renew_by_email_enabled:
|
||||
if "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
template_dir = config.get("template_dir")
|
||||
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
|
||||
|
||||
if "account_renewed_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["account_renewed_html_path"])
|
||||
|
||||
self.account_renewed_html_content = self.read_file(
|
||||
file_path, "account_validity.account_renewed_html_path"
|
||||
)
|
||||
else:
|
||||
self.account_renewed_html_content = (
|
||||
"<html><body>Your account has been successfully renewed.</body><html>"
|
||||
)
|
||||
|
||||
if "invalid_token_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["invalid_token_html_path"])
|
||||
|
||||
self.invalid_token_html_content = self.read_file(
|
||||
file_path, "account_validity.invalid_token_html_path"
|
||||
)
|
||||
else:
|
||||
self.invalid_token_html_content = (
|
||||
"<html><body>Invalid renewal token.</body><html>"
|
||||
)
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
section = "registration"
|
||||
|
||||
@@ -92,10 +30,6 @@ class RegistrationConfig(Config):
|
||||
str(config["disable_registration"])
|
||||
)
|
||||
|
||||
self.account_validity = AccountValidityConfig(
|
||||
config.get("account_validity") or {}, config
|
||||
)
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
|
||||
@@ -207,69 +141,6 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10%% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
@@ -478,4 +349,4 @@ class RegistrationConfig(Config):
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.enable_registration is not None:
|
||||
self.enable_registration = bool(strtobool(str(args.enable_registration)))
|
||||
self.enable_registration = strtobool(str(args.enable_registration))
|
||||
|
||||
@@ -25,7 +25,10 @@ from ._util import validate_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = (
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider"
|
||||
# The module that DefaultSamlMappingProvider is in was renamed, we want to
|
||||
# transparently handle both the same.
|
||||
LEGACY_USER_MAPPING_PROVIDER = (
|
||||
"synapse.handlers.saml_handler.DefaultSamlMappingProvider"
|
||||
)
|
||||
|
||||
@@ -97,6 +100,8 @@ class SAML2Config(Config):
|
||||
|
||||
# Use the default user mapping provider if not set
|
||||
ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER:
|
||||
ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER
|
||||
|
||||
# Ensure a config is present
|
||||
ump_dict["config"] = ump_dict.get("config") or {}
|
||||
@@ -159,7 +164,13 @@ class SAML2Config(Config):
|
||||
config_path = saml2_config.get("config_path", None)
|
||||
if config_path is not None:
|
||||
mod = load_python_module(config_path)
|
||||
_dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict)
|
||||
config = getattr(mod, "CONFIG", None)
|
||||
if config is None:
|
||||
raise ConfigError(
|
||||
"Config path specified by saml2_config.config_path does not "
|
||||
"have a CONFIG property."
|
||||
)
|
||||
_dict_merge(merge_dict=config, into_dict=saml2_config_dict)
|
||||
|
||||
import saml2.config
|
||||
|
||||
@@ -19,7 +19,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -235,7 +235,11 @@ class ServerConfig(Config):
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
|
||||
# Whether to enable user presence.
|
||||
presence_config = config.get("presence") or {}
|
||||
@@ -407,10 +411,6 @@ class ServerConfig(Config):
|
||||
config_path=("federation_ip_range_blacklist",),
|
||||
)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
|
||||
# (undocumented) option for torturing the worker-mode replication a bit,
|
||||
# for testing. The value defines the number of milliseconds to pause before
|
||||
# sending out any replication updates.
|
||||
@@ -572,6 +572,7 @@ class ServerConfig(Config):
|
||||
_warn_if_webclient_configured(self.listeners)
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig:
|
||||
@@ -917,6 +918,16 @@ class ServerConfig(Config):
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -1305,6 +1316,24 @@ class ServerConfig(Config):
|
||||
help="Turn on the twisted telnet manhole service on the given port.",
|
||||
)
|
||||
|
||||
def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]:
|
||||
"""Reads the three durations for the GC min interval option, returning seconds."""
|
||||
if durations is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
if len(durations) != 3:
|
||||
raise ValueError()
|
||||
return (
|
||||
self.parse_duration(durations[0]) / 1000,
|
||||
self.parse_duration(durations[1]) / 1000,
|
||||
self.parse_duration(durations[2]) / 1000,
|
||||
)
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
"Value of `gc_min_interval` must be a list of three durations if set"
|
||||
)
|
||||
|
||||
|
||||
def is_threepid_reserved(reserved_threepids, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
|
||||
@@ -16,10 +16,7 @@ import logging
|
||||
import os
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
from typing import List, Optional
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
from typing import List, Optional, Pattern
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
|
||||
@@ -83,13 +80,6 @@ class TlsConfig(Config):
|
||||
"configured."
|
||||
)
|
||||
|
||||
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
|
||||
|
||||
if self._original_tls_fingerprints is None:
|
||||
self._original_tls_fingerprints = []
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Whether to verify certificates on outbound federation traffic
|
||||
self.federation_verify_certificates = config.get(
|
||||
"federation_verify_certificates", True
|
||||
@@ -124,7 +114,7 @@ class TlsConfig(Config):
|
||||
fed_whitelist_entries = []
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[str]
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
|
||||
for entry in fed_whitelist_entries:
|
||||
try:
|
||||
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
|
||||
@@ -248,19 +238,6 @@ class TlsConfig(Config):
|
||||
e,
|
||||
)
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
if self.tls_certificate:
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
|
||||
|
||||
def generate_config_section(
|
||||
self,
|
||||
config_dir_path,
|
||||
@@ -443,33 +420,6 @@ class TlsConfig(Config):
|
||||
# If unspecified, we will use CONFDIR/client.key.
|
||||
#
|
||||
account_key_file: %(default_acme_account_file)s
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
"""
|
||||
# Lowercase the string representation of boolean values
|
||||
% {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Set
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -32,6 +34,8 @@ class TracerConfig(Config):
|
||||
{"sampler": {"type": "const", "param": 1}, "logging": False},
|
||||
)
|
||||
|
||||
self.force_tracing_for_users: Set[str] = set()
|
||||
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
@@ -48,6 +52,19 @@ class TracerConfig(Config):
|
||||
if not isinstance(self.opentracer_whitelist, list):
|
||||
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||
|
||||
force_tracing_for_users = opentracing_config.get("force_tracing_for_users", [])
|
||||
if not isinstance(force_tracing_for_users, list):
|
||||
raise ConfigError(
|
||||
"Expected a list", ("opentracing", "force_tracing_for_users")
|
||||
)
|
||||
for i, u in enumerate(force_tracing_for_users):
|
||||
if not isinstance(u, str):
|
||||
raise ConfigError(
|
||||
"Expected a string",
|
||||
("opentracing", "force_tracing_for_users", f"index {i}"),
|
||||
)
|
||||
self.force_tracing_for_users.add(u)
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## Opentracing ##
|
||||
@@ -64,7 +81,8 @@ class TracerConfig(Config):
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -73,19 +91,26 @@ class TracerConfig(Config):
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
"""
|
||||
|
||||
@@ -64,6 +64,14 @@ class WriterLocations:
|
||||
Attributes:
|
||||
events: The instances that write to the event and backfill streams.
|
||||
typing: The instance that writes to the typing stream.
|
||||
to_device: The instances that write to the to_device stream. Currently
|
||||
can only be a single instance.
|
||||
account_data: The instances that write to the account data streams. Currently
|
||||
can only be a single instance.
|
||||
receipts: The instances that write to the receipts stream. Currently
|
||||
can only be a single instance.
|
||||
presence: The instances that write to the presence stream. Currently
|
||||
can only be a single instance.
|
||||
"""
|
||||
|
||||
events = attr.ib(
|
||||
@@ -85,6 +93,11 @@ class WriterLocations:
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
presence = attr.ib(
|
||||
default=["master"],
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
@@ -188,7 +201,14 @@ class WorkerConfig(Config):
|
||||
|
||||
# Check that the configured writers for events and typing also appears in
|
||||
# `instance_map`.
|
||||
for stream in ("events", "typing", "to_device", "account_data", "receipts"):
|
||||
for stream in (
|
||||
"events",
|
||||
"typing",
|
||||
"to_device",
|
||||
"account_data",
|
||||
"receipts",
|
||||
"presence",
|
||||
):
|
||||
instances = _instance_to_list_converter(getattr(self.writers, stream))
|
||||
for instance in instances:
|
||||
if instance != "master" and instance not in self.instance_map:
|
||||
@@ -215,6 +235,11 @@ class WorkerConfig(Config):
|
||||
if len(self.writers.events) == 0:
|
||||
raise ConfigError("Must specify at least one instance to handle `events`.")
|
||||
|
||||
if len(self.writers.presence) != 1:
|
||||
raise ConfigError(
|
||||
"Must only specify one instance to handle `presence` messages."
|
||||
)
|
||||
|
||||
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
|
||||
self.writers.events
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ import abc
|
||||
import logging
|
||||
import urllib
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
from signedjson.key import (
|
||||
@@ -42,6 +42,8 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.config.key import TrustedKeyServer
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import prune_event_dict
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
@@ -69,7 +71,11 @@ class VerifyJsonRequest:
|
||||
Attributes:
|
||||
server_name: The name of the server to verify against.
|
||||
|
||||
json_object: The JSON object to verify.
|
||||
get_json_object: A callback to fetch the JSON object to verify.
|
||||
A callback is used to allow deferring the creation of the JSON
|
||||
object to verify until needed, e.g. for events we can defer
|
||||
creating the redacted copy. This reduces the memory usage when
|
||||
there are large numbers of in flight requests.
|
||||
|
||||
minimum_valid_until_ts: time at which we require the signing key to
|
||||
be valid. (0 implies we don't care)
|
||||
@@ -88,14 +94,50 @@ class VerifyJsonRequest:
|
||||
"""
|
||||
|
||||
server_name = attr.ib(type=str)
|
||||
json_object = attr.ib(type=JsonDict)
|
||||
get_json_object = attr.ib(type=Callable[[], JsonDict])
|
||||
minimum_valid_until_ts = attr.ib(type=int)
|
||||
request_name = attr.ib(type=str)
|
||||
key_ids = attr.ib(init=False, type=List[str])
|
||||
key_ids = attr.ib(type=List[str])
|
||||
key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self.key_ids = signature_ids(self.json_object, self.server_name)
|
||||
@staticmethod
|
||||
def from_json_object(
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
minimum_valid_until_ms: int,
|
||||
request_name: str,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
lambda: json_object,
|
||||
minimum_valid_until_ms,
|
||||
request_name=request_name,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_event(
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on an event
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = list(event.signatures.get(server_name, []))
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
# We defer creating the redacted json object, as it uses a lot more
|
||||
# memory than the Event object itself.
|
||||
lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
|
||||
minimum_valid_until_ms,
|
||||
request_name=event.event_id,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
@@ -147,8 +189,13 @@ class Keyring:
|
||||
Deferred[None]: completes if the the object was correctly signed, otherwise
|
||||
errbacks with an error
|
||||
"""
|
||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
requests = (req,)
|
||||
request = VerifyJsonRequest.from_json_object(
|
||||
server_name,
|
||||
json_object,
|
||||
validity_time,
|
||||
request_name,
|
||||
)
|
||||
requests = (request,)
|
||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||
|
||||
def verify_json_objects_for_server(
|
||||
@@ -175,10 +222,41 @@ class Keyring:
|
||||
logcontext.
|
||||
"""
|
||||
return self._verify_objects(
|
||||
VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
VerifyJsonRequest.from_json_object(
|
||||
server_name, json_object, validity_time, request_name
|
||||
)
|
||||
for server_name, json_object, validity_time, request_name in server_and_json
|
||||
)
|
||||
|
||||
def verify_events_for_server(
|
||||
self, server_and_events: Iterable[Tuple[str, EventBase, int]]
|
||||
) -> List[defer.Deferred]:
|
||||
"""Bulk verification of signatures on events.
|
||||
|
||||
Args:
|
||||
server_and_events:
|
||||
Iterable of `(server_name, event, validity_time)` tuples.
|
||||
|
||||
`server_name` is which server we are verifying the signature for
|
||||
on the event.
|
||||
|
||||
`event` is the event that we'll verify the signatures of for
|
||||
the given `server_name`.
|
||||
|
||||
`validity_time` is a timestamp at which the signing key must be
|
||||
valid.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||
or failure to verify each event's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
"""
|
||||
return self._verify_objects(
|
||||
VerifyJsonRequest.from_event(server_name, event, validity_time)
|
||||
for server_name, event, validity_time in server_and_events
|
||||
)
|
||||
|
||||
def _verify_objects(
|
||||
self, verify_requests: Iterable[VerifyJsonRequest]
|
||||
) -> List[defer.Deferred]:
|
||||
@@ -892,7 +970,7 @@ async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = await verify_request.key_ready
|
||||
|
||||
json_object = verify_request.json_object
|
||||
json_object = verify_request.get_json_object()
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import List, Optional, Set, Tuple
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.constants import MAX_PDU_SIZE, EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
@@ -205,7 +205,7 @@ def _check_size_limits(event: EventBase) -> None:
|
||||
too_big("type")
|
||||
if len(event.event_id) > 255:
|
||||
too_big("event_id")
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
|
||||
too_big("event")
|
||||
|
||||
|
||||
@@ -670,7 +670,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
for key_name in signature_block.keys():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
@@ -688,7 +688,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase
|
||||
return False
|
||||
|
||||
|
||||
def get_public_keys(invite_event):
|
||||
def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {"public_key": invite_event.content["public_key"]}
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import Collection
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -114,7 +114,9 @@ class SpamChecker:
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
@@ -137,11 +137,7 @@ class FederationBase:
|
||||
return deferreds
|
||||
|
||||
|
||||
class PduToCheckSig(
|
||||
namedtuple(
|
||||
"PduToCheckSig", ["pdu", "redacted_pdu_json", "sender_domain", "deferreds"]
|
||||
)
|
||||
):
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
|
||||
pass
|
||||
|
||||
|
||||
@@ -184,7 +180,6 @@ def _check_sigs_on_pdus(
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
@@ -195,13 +190,12 @@ def _check_sigs_on_pdus(
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
more_deferreds = keyring.verify_events_for_server(
|
||||
[
|
||||
(
|
||||
p.sender_domain,
|
||||
p.redacted_pdu_json,
|
||||
p.pdu,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_sender
|
||||
]
|
||||
@@ -230,13 +224,12 @@ def _check_sigs_on_pdus(
|
||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
more_deferreds = keyring.verify_events_for_server(
|
||||
[
|
||||
(
|
||||
get_domain_from_id(p.pdu.event_id),
|
||||
p.redacted_pdu_json,
|
||||
p.pdu,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_event_id
|
||||
]
|
||||
|
||||
@@ -55,6 +55,7 @@ from synapse.api.room_versions import (
|
||||
)
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.logging.context import make_deferred_yieldable, preserve_fn
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
@@ -451,6 +452,28 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
# There is no good way to detect an "unknown" endpoint.
|
||||
#
|
||||
# Dendrite returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
return e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
@@ -468,9 +491,9 @@ class FederationClient(FederationBase):
|
||||
callback: Function to run for each server. Passed a single
|
||||
argument: the server_name to try.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
reraised.
|
||||
If the callback raises a CodeMessageException with a 300/400 code or
|
||||
an UnsupportedRoomVersionError, attempts to perform the operation
|
||||
stop immediately and the exception is reraised.
|
||||
|
||||
Otherwise, if the callback raises an Exception the error is logged and the
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
@@ -492,8 +515,7 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
res = await callback(destination)
|
||||
return res
|
||||
return await callback(destination)
|
||||
except InvalidResponseError as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except UnsupportedRoomVersionError:
|
||||
@@ -502,17 +524,15 @@ class FederationClient(FederationBase):
|
||||
synapse_error = e.to_synapse_error()
|
||||
failover = False
|
||||
|
||||
# Failover on an internal server error, or if the destination
|
||||
# doesn't implemented the endpoint for some reason.
|
||||
if 500 <= e.code < 600:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint:
|
||||
# there is no good way to detect an "unknown" endpoint. Dendrite
|
||||
# returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
if e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
):
|
||||
failover = True
|
||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||
e, synapse_error
|
||||
):
|
||||
failover = True
|
||||
|
||||
if not failover:
|
||||
raise synapse_error from e
|
||||
@@ -570,9 +590,8 @@ class FederationClient(FederationBase):
|
||||
UnsupportedRoomVersionError: if remote responds with
|
||||
a room version we don't understand.
|
||||
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -642,25 +661,15 @@ class FederationClient(FederationBase):
|
||||
``auth_chain``.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
content = await self._do_send_join(destination, pdu)
|
||||
response = await self._do_send_join(room_version, destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
state = response.state
|
||||
auth_chain = response.auth_events
|
||||
|
||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
||||
|
||||
@@ -673,7 +682,7 @@ class FederationClient(FederationBase):
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
raise InvalidResponseError("No create event in state")
|
||||
|
||||
# the room version should be sane.
|
||||
create_room_version = create_event.content.get(
|
||||
@@ -735,41 +744,36 @@ class FederationClient(FederationBase):
|
||||
|
||||
return await self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict:
|
||||
async def _do_send_join(
|
||||
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
||||
) -> SendJoinResponse:
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
return await self.transport_layer.send_join_v2(
|
||||
room_version=room_version,
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = await self.transport_layer.send_join_v1(
|
||||
return await self.transport_layer.send_join_v1(
|
||||
room_version=room_version,
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
# We expect the v1 API to respond with [200, content], so we only return the
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
async def send_invite(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -802,6 +806,11 @@ class FederationClient(FederationBase):
|
||||
|
||||
Returns:
|
||||
The event as a dict as returned by the remote server
|
||||
|
||||
Raises:
|
||||
SynapseError: if the remote server returns an error or if the server
|
||||
only supports the v1 endpoint and a room version other than "1"
|
||||
or "2" is requested.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
@@ -817,28 +826,19 @@ class FederationClient(FederationBase):
|
||||
},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API. That's ok provided the room uses old-style event
|
||||
# IDs.
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
||||
# Otherwise consider it a legitmate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code in (403, 429):
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
raise
|
||||
raise err
|
||||
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
@@ -865,9 +865,8 @@ class FederationClient(FederationBase):
|
||||
pdu: event to be sent
|
||||
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> None:
|
||||
@@ -889,16 +888,11 @@ class FederationClient(FederationBase):
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
@@ -865,14 +864,6 @@ class FederationHandlerRegistry:
|
||||
# EDU received.
|
||||
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
)
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
) -> None:
|
||||
@@ -926,16 +917,6 @@ class FederationHandlerRegistry:
|
||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
||||
return
|
||||
|
||||
# If the incoming room key requests from a particular origin are over
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not await self._room_key_request_rate_limiter.can_do_action(
|
||||
None, origin
|
||||
)
|
||||
):
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
|
||||
510
synapse/federation/send_queue.py
Normal file
510
synapse/federation/send_queue.py
Normal file
@@ -0,0 +1,510 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""A federation sender that forwards things to be sent across replication to
|
||||
a worker process.
|
||||
|
||||
It assumes there is a single worker process feeding off of it.
|
||||
|
||||
Each row in the replication stream consists of a type and some json, where the
|
||||
types indicate whether they are presence, or edus, etc.
|
||||
|
||||
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
||||
updates since a particular point, all in-memory data since before that point is
|
||||
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
||||
dead worker doesn't cause the queues to grow limitlessly.
|
||||
|
||||
Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sized,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# We may have multiple federation sender instances, so we need to track
|
||||
# their positions separately.
|
||||
self._sender_instances = hs.config.worker.federation_shard_config.instances
|
||||
self._sender_positions = {} # type: Dict[str, int]
|
||||
|
||||
# Pending presence map user_id -> UserPresenceState
|
||||
self.presence_map = {} # type: Dict[str, UserPresenceState]
|
||||
|
||||
# Stores the destinations we need to explicitly send presence to about a
|
||||
# given user.
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, Iterable[str]]]
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
|
||||
|
||||
# stream position -> (destination, key)
|
||||
self.keyed_edu_changed = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, tuple]]
|
||||
|
||||
self.edus = SortedDict() # type: SortedDict[int, Edu]
|
||||
|
||||
# stream ID for the next entry into keyed_edu_changed/edus.
|
||||
self.pos = 1
|
||||
|
||||
# map from stream ID to the time that stream entry was generated, so that we
|
||||
# can clear out entries after a while
|
||||
self.pos_time = SortedDict() # type: SortedDict[int, int]
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
"synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"",
|
||||
[],
|
||||
lambda: len(queue),
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map",
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self) -> int:
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self) -> None:
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
keys = self.pos_time.keys()
|
||||
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
if not keys[:time]:
|
||||
return
|
||||
|
||||
position_to_delete = max(keys[:time])
|
||||
for key in keys[:time]:
|
||||
del self.pos_time[key]
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
keys = self.presence_destinations.keys()
|
||||
i = self.presence_destinations.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.presence_destinations[key]
|
||||
|
||||
user_ids = {user_id for user_id, _ in self.presence_destinations.values()}
|
||||
|
||||
to_del = [
|
||||
user_id for user_id in self.presence_map if user_id not in user_ids
|
||||
]
|
||||
for user_id in to_del:
|
||||
del self.presence_map[user_id]
|
||||
|
||||
# Delete things out of keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = self.keyed_edu_changed.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.keyed_edu_changed[key]
|
||||
|
||||
live_keys = set()
|
||||
for edu_key in self.keyed_edu_changed.values():
|
||||
live_keys.add(edu_key)
|
||||
|
||||
keys_to_del = [
|
||||
edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
|
||||
]
|
||||
for edu_key in keys_to_del:
|
||||
del self.keyed_edu[edu_key]
|
||||
|
||||
# Delete things out of edu map
|
||||
keys = self.edus.keys()
|
||||
i = self.edus.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""As per FederationSender"""
|
||||
# This should never get called.
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
pos = self._next_pos()
|
||||
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if key:
|
||||
assert isinstance(key, tuple)
|
||||
self.keyed_edu[(destination, key)] = edu
|
||||
self.keyed_edu_changed[pos] = (destination, key)
|
||||
else:
|
||||
self.edus[pos] = edu
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
receipt:
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states
|
||||
destinations
|
||||
"""
|
||||
for state in states:
|
||||
pos = self._next_pos()
|
||||
self.presence_map.update({state.user_id: state for state in states})
|
||||
self.presence_destinations[pos] = (state.user_id, destinations)
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
def wake_destination(self, server: str) -> None:
|
||||
pass
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self.pos - 1
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
if self._sender_instances:
|
||||
# If we have configured multiple federation sender instances we need
|
||||
# to track their positions separately, and only clear the queue up
|
||||
# to the token all instances have acked.
|
||||
self._sender_positions[instance_name] = token
|
||||
token = min(self._sender_positions.values())
|
||||
|
||||
self._clear_queue_before_pos(token)
|
||||
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
||||
"""Get rows to be sent over federation between the two tokens
|
||||
|
||||
Args:
|
||||
instance_name: the name of the current process
|
||||
from_token: the previous stream token: the starting point for fetching the
|
||||
updates
|
||||
to_token: the new stream token: the point to get updates up to
|
||||
target_row_count: a target for the number of rows to be returned.
|
||||
|
||||
Returns: a triplet `(updates, new_last_token, limited)`, where:
|
||||
* `updates` is a list of `(token, row)` entries.
|
||||
* `new_last_token` is the new position in stream.
|
||||
* `limited` is whether there are more updates to fetch.
|
||||
"""
|
||||
# TODO: Handle target_row_count.
|
||||
|
||||
# To handle restarts where we wrap around
|
||||
if from_token > self.pos:
|
||||
from_token = -1
|
||||
|
||||
# list of tuple(int, BaseFederationRow), where the first is the position
|
||||
# of the federation stream.
|
||||
rows = [] # type: List[Tuple[int, BaseFederationRow]]
|
||||
|
||||
# Fetch presence to send to destinations
|
||||
i = self.presence_destinations.bisect_right(from_token)
|
||||
j = self.presence_destinations.bisect_right(to_token) + 1
|
||||
|
||||
for pos, (user_id, dests) in self.presence_destinations.items()[i:j]:
|
||||
rows.append(
|
||||
(
|
||||
pos,
|
||||
PresenceDestinationsRow(
|
||||
state=self.presence_map[user_id], destinations=list(dests)
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Fetch changes keyed edus
|
||||
i = self.keyed_edu_changed.bisect_right(from_token)
|
||||
j = self.keyed_edu_changed.bisect_right(to_token) + 1
|
||||
# We purposefully clobber based on the key here, python dict comprehensions
|
||||
# always use the last value, so this will correctly point to the last
|
||||
# stream position.
|
||||
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
|
||||
|
||||
for ((destination, edu_key), pos) in keyed_edus.items():
|
||||
rows.append(
|
||||
(
|
||||
pos,
|
||||
KeyedEduRow(
|
||||
key=edu_key, edu=self.keyed_edu[(destination, edu_key)]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Fetch changed edus
|
||||
i = self.edus.bisect_right(from_token)
|
||||
j = self.edus.bisect_right(to_token) + 1
|
||||
edus = self.edus.items()[i:j]
|
||||
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EduRow(edu)))
|
||||
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
return (
|
||||
[(pos, (row.TypeId, row.to_data())) for pos, row in rows],
|
||||
to_token,
|
||||
False,
|
||||
)
|
||||
|
||||
|
||||
class BaseFederationRow:
|
||||
"""Base class for rows to be sent in the federation stream.
|
||||
|
||||
Specifies how to identify, serialize and deserialize the different types.
|
||||
"""
|
||||
|
||||
TypeId = "" # Unique string that ids the type. Must be overridden in sub classes.
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
"""Parse the data from the federation stream into a row.
|
||||
|
||||
Args:
|
||||
data: The value of ``data`` from FederationStreamRow.data, type
|
||||
depends on the type of stream
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def to_data(self):
|
||||
"""Serialize this row to be sent over the federation stream.
|
||||
|
||||
Returns:
|
||||
The value to be sent in FederationStreamRow.data. The type depends
|
||||
on the type of stream.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
"""Add this row to the appropriate field in the buffer ready for this
|
||||
to be sent over federation.
|
||||
|
||||
We use a buffer so that we can batch up events that have come in at
|
||||
the same time and send them all at once.
|
||||
|
||||
Args:
|
||||
buff (BufferedToSend)
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PresenceDestinationsRow(
|
||||
BaseFederationRow,
|
||||
namedtuple(
|
||||
"PresenceDestinationsRow",
|
||||
("state", "destinations"), # UserPresenceState # list[str]
|
||||
),
|
||||
):
|
||||
TypeId = "pd"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return PresenceDestinationsRow(
|
||||
state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"]
|
||||
)
|
||||
|
||||
def to_data(self):
|
||||
return {"state": self.state.as_dict(), "dests": self.destinations}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.presence_destinations.append((self.state, self.destinations))
|
||||
|
||||
|
||||
class KeyedEduRow(
|
||||
BaseFederationRow,
|
||||
namedtuple(
|
||||
"KeyedEduRow",
|
||||
("key", "edu"), # tuple(str) - the edu key passed to send_edu # Edu
|
||||
),
|
||||
):
|
||||
"""Streams EDUs that have an associated key that is ued to clobber. For example,
|
||||
typing EDUs clobber based on room_id.
|
||||
"""
|
||||
|
||||
TypeId = "k"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return KeyedEduRow(key=tuple(data["key"]), edu=Edu(**data["edu"]))
|
||||
|
||||
def to_data(self):
|
||||
return {"key": self.key, "edu": self.edu.get_internal_dict()}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.keyed_edus.setdefault(self.edu.destination, {})[self.key] = self.edu
|
||||
|
||||
|
||||
class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
|
||||
"""Streams EDUs that don't have keys. See KeyedEduRow"""
|
||||
|
||||
TypeId = "e"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return EduRow(Edu(**data))
|
||||
|
||||
def to_data(self):
|
||||
return self.edu.get_internal_dict()
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||
|
||||
|
||||
_rowtypes = (
|
||||
PresenceDestinationsRow,
|
||||
KeyedEduRow,
|
||||
EduRow,
|
||||
) # type: Tuple[Type[BaseFederationRow], ...]
|
||||
|
||||
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}
|
||||
|
||||
|
||||
ParsedFederationStreamData = namedtuple(
|
||||
"ParsedFederationStreamData",
|
||||
(
|
||||
"presence_destinations", # list of tuples of UserPresenceState and destinations
|
||||
"keyed_edus", # dict of destination -> { key -> Edu }
|
||||
"edus", # dict of destination -> [Edu]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def process_rows_for_federation(
|
||||
transaction_queue: FederationSender,
|
||||
rows: List[FederationStream.FederationStreamRow],
|
||||
) -> None:
|
||||
"""Parse a list of rows from the federation stream and put them in the
|
||||
transaction queue ready for sending to the relevant homeservers.
|
||||
|
||||
Args:
|
||||
transaction_queue
|
||||
rows
|
||||
"""
|
||||
|
||||
# The federation stream contains a bunch of different types of
|
||||
# rows that need to be handled differently. We parse the rows, put
|
||||
# them into the appropriate collection and then send them off.
|
||||
|
||||
buff = ParsedFederationStreamData(
|
||||
presence_destinations=[],
|
||||
keyed_edus={},
|
||||
edus={},
|
||||
)
|
||||
|
||||
# Parse the rows in the stream and add to the buffer
|
||||
for row in rows:
|
||||
if row.type not in TypeToRow:
|
||||
logger.error("Unrecognized federation row type %r", row.type)
|
||||
continue
|
||||
|
||||
RowType = TypeToRow[row.type]
|
||||
parsed_row = RowType.from_data(row.data)
|
||||
parsed_row.add_to_buffer(buff)
|
||||
|
||||
for state, destinations in buff.presence_destinations:
|
||||
transaction_queue.send_presence_to_destinations(
|
||||
states=[state], destinations=destinations
|
||||
)
|
||||
|
||||
for edu_map in buff.keyed_edus.values():
|
||||
for key, edu in edu_map.items():
|
||||
transaction_queue.send_edu(edu, key)
|
||||
|
||||
for edu_list in buff.edus.values():
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(edu, None)
|
||||
@@ -18,12 +18,15 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set,
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import (
|
||||
LaterGauge,
|
||||
event_processing_loop_counter,
|
||||
@@ -31,7 +34,7 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Collection, JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -123,6 +126,10 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
def get_current_token(self) -> int:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
@@ -248,27 +255,15 @@ class FederationSender(AbstractFederationSender):
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
async def get_destinations_for_event(
|
||||
event: EventBase,
|
||||
) -> Collection[str]:
|
||||
"""Computes the destinations to which this event must be sent.
|
||||
|
||||
This returns an empty tuple when there are no destinations to send to,
|
||||
or if this event is not from this homeserver and it is not sending
|
||||
it on behalf of another server.
|
||||
|
||||
Will also filter out destinations which this sender is not responsible for,
|
||||
if multiple federation senders exist.
|
||||
"""
|
||||
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
return ()
|
||||
return
|
||||
|
||||
if not event.internal_metadata.should_proactively_send():
|
||||
return ()
|
||||
return
|
||||
|
||||
destinations = None # type: Optional[Set[str]]
|
||||
if not event.prev_event_ids():
|
||||
@@ -303,7 +298,7 @@ class FederationSender(AbstractFederationSender):
|
||||
"Failed to calculate hosts in room for event: %s",
|
||||
event.event_id,
|
||||
)
|
||||
return ()
|
||||
return
|
||||
|
||||
destinations = {
|
||||
d
|
||||
@@ -313,15 +308,17 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
}
|
||||
|
||||
destinations.discard(self.server_name)
|
||||
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
if destinations:
|
||||
await self._send_pdu(event, destinations)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(event.event_id)
|
||||
|
||||
@@ -329,29 +326,24 @@ class FederationSender(AbstractFederationSender):
|
||||
"federation_sender"
|
||||
).observe((now - ts) / 1000)
|
||||
|
||||
return destinations
|
||||
return ()
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
await handle_event(event)
|
||||
|
||||
async def get_federatable_events_and_destinations(
|
||||
events: Iterable[EventBase],
|
||||
) -> List[Tuple[EventBase, Collection[str]]]:
|
||||
with Measure(self.clock, "get_destinations_for_events"):
|
||||
# Fetch federation destinations per event,
|
||||
# skip if get_destinations_for_event returns an empty collection,
|
||||
# return list of event->destinations pairs.
|
||||
return [
|
||||
(event, dests)
|
||||
for (event, dests) in [
|
||||
(event, await get_destinations_for_event(event))
|
||||
for event in events
|
||||
]
|
||||
if dests
|
||||
]
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
events_and_dests = await get_federatable_events_and_destinations(events)
|
||||
|
||||
# Send corresponding events to each destination queue
|
||||
await self._distribute_events(events_and_dests)
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(handle_room_events, evs)
|
||||
for evs in events_by_room.values()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
@@ -369,7 +361,7 @@ class FederationSender(AbstractFederationSender):
|
||||
events_processed_counter.inc(len(events))
|
||||
|
||||
event_processing_loop_room_count.labels("federation_sender").inc(
|
||||
len({event.room_id for event in events})
|
||||
len(events_by_room)
|
||||
)
|
||||
|
||||
event_processing_loop_counter.labels("federation_sender").inc()
|
||||
@@ -381,53 +373,34 @@ class FederationSender(AbstractFederationSender):
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
async def _distribute_events(
|
||||
self,
|
||||
events_and_dests: Iterable[Tuple[EventBase, Collection[str]]],
|
||||
) -> None:
|
||||
"""Distribute events to the respective per_destination queues.
|
||||
async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
Also persists last-seen per-room stream_ordering to 'destination_rooms'.
|
||||
destinations = set(destinations)
|
||||
destinations.discard(self.server_name)
|
||||
logger.debug("Sending to: %s", str(destinations))
|
||||
|
||||
Args:
|
||||
events_and_dests: A list of tuples, which are (event: EventBase, destinations: Collection[str]).
|
||||
Every event is paired with its intended destinations (in federation).
|
||||
"""
|
||||
# Tuples of room_id + destination to their max-seen stream_ordering
|
||||
room_with_dest_stream_ordering = {} # type: Dict[Tuple[str, str], int]
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
# List of events to send to each destination
|
||||
events_by_dest = {} # type: Dict[str, List[EventBase]]
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
|
||||
# For each event-destinations pair...
|
||||
for event, destinations in events_and_dests:
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
|
||||
# (we got this from the database, it's filled)
|
||||
assert event.internal_metadata.stream_ordering
|
||||
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
|
||||
# ...iterate over those destinations..
|
||||
for destination in destinations:
|
||||
# ...update their stream-ordering...
|
||||
room_with_dest_stream_ordering[(event.room_id, destination)] = max(
|
||||
event.internal_metadata.stream_ordering,
|
||||
room_with_dest_stream_ordering.get((event.room_id, destination), 0),
|
||||
)
|
||||
|
||||
# ...and add the event to each destination queue.
|
||||
events_by_dest.setdefault(destination, []).append(event)
|
||||
|
||||
# Bulk-store destination_rooms stream_ids
|
||||
await self.store.bulk_store_destination_rooms_entries(
|
||||
room_with_dest_stream_ordering
|
||||
# track the fact that we have a PDU for these destinations,
|
||||
# to allow us to perform catch-up later on if the remote is unreachable
|
||||
# for a while.
|
||||
await self.store.store_destination_rooms_entries(
|
||||
destinations,
|
||||
pdu.room_id,
|
||||
pdu.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
for destination, pdus in events_by_dest.items():
|
||||
logger.debug("Sending %d pdus to %s", len(pdus), destination)
|
||||
|
||||
self._get_per_destination_queue(destination).send_pdus(pdus)
|
||||
for destination in destinations:
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu)
|
||||
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
@@ -535,6 +508,10 @@ class FederationSender(AbstractFederationSender):
|
||||
# No-op if presence is disabled.
|
||||
return
|
||||
|
||||
# Ensure we only send out presence states for local users.
|
||||
for state in states:
|
||||
assert self.is_mine_id(state.user_id)
|
||||
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
@@ -631,6 +608,10 @@ class FederationSender(AbstractFederationSender):
|
||||
# to a worker.
|
||||
return 0
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
# It is not expected that this gets called on FederationSender.
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
async def get_replication_rows(
|
||||
instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -154,22 +155,19 @@ class PerDestinationQueue:
|
||||
+ len(self._pending_edus_keyed)
|
||||
)
|
||||
|
||||
def send_pdus(self, pdus: Iterable[EventBase]) -> None:
|
||||
"""Add PDUs to the queue, and start the transmission loop if necessary
|
||||
def send_pdu(self, pdu: EventBase) -> None:
|
||||
"""Add a PDU to the queue, and start the transmission loop if necessary
|
||||
|
||||
Args:
|
||||
pdus: pdus to send
|
||||
pdu: pdu to send
|
||||
"""
|
||||
if not self._catching_up or self._last_successful_stream_ordering is None:
|
||||
# only enqueue the PDU if we are not catching up (False) or do not
|
||||
# yet know if we have anything to catch up (None)
|
||||
self._pending_pdus.extend(pdus)
|
||||
self._pending_pdus.append(pdu)
|
||||
else:
|
||||
self._catchup_last_skipped = max(
|
||||
pdu.internal_metadata.stream_ordering
|
||||
for pdu in pdus
|
||||
if pdu.internal_metadata.stream_ordering is not None
|
||||
)
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
self._catchup_last_skipped = pdu.internal_metadata.stream_ordering
|
||||
|
||||
self.attempt_new_transaction()
|
||||
|
||||
@@ -577,6 +575,14 @@ class PerDestinationQueue:
|
||||
for content in contents
|
||||
]
|
||||
|
||||
if edus:
|
||||
issue9533_logger.debug(
|
||||
"Sending %i to-device messages to %s, up to stream id %i",
|
||||
len(edus),
|
||||
self._destination,
|
||||
stream_id,
|
||||
)
|
||||
|
||||
return (edus, stream_id)
|
||||
|
||||
def _start_catching_up(self) -> None:
|
||||
|
||||
@@ -17,13 +17,19 @@ import logging
|
||||
import urllib
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import attr
|
||||
import ijson
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.http.matrixfederationclient import ByteParser
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -240,21 +246,36 @@ class TransportLayerClient:
|
||||
return content
|
||||
|
||||
@log_function
|
||||
async def send_join_v1(self, destination, room_id, event_id, content):
|
||||
async def send_join_v1(
|
||||
self,
|
||||
room_version,
|
||||
destination,
|
||||
room_id,
|
||||
event_id,
|
||||
content,
|
||||
) -> "SendJoinResponse":
|
||||
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=True),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@log_function
|
||||
async def send_join_v2(self, destination, room_id, event_id, content):
|
||||
async def send_join_v2(
|
||||
self, room_version, destination, room_id, event_id, content
|
||||
) -> "SendJoinResponse":
|
||||
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=False),
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -995,6 +1016,7 @@ class TransportLayerClient:
|
||||
returned per space
|
||||
exclude_rooms: a list of any rooms we can skip
|
||||
"""
|
||||
# TODO When switching to the stable endpoint, use GET instead of POST.
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id
|
||||
)
|
||||
@@ -1052,3 +1074,59 @@ def _create_v2_path(path, *args):
|
||||
str
|
||||
"""
|
||||
return _create_path(FEDERATION_V2_PREFIX, path, *args)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class SendJoinResponse:
|
||||
"""The parsed response of a `/send_join` request."""
|
||||
|
||||
auth_events: List[EventBase]
|
||||
state: List[EventBase]
|
||||
|
||||
|
||||
@ijson.coroutine
|
||||
def _event_list_parser(room_version: RoomVersion, events: List[EventBase]):
|
||||
"""Helper function for use with `ijson.items_coro` to parse an array of
|
||||
events and add them to the given list.
|
||||
"""
|
||||
|
||||
while True:
|
||||
obj = yield
|
||||
event = make_event_from_dict(obj, room_version)
|
||||
events.append(event)
|
||||
|
||||
|
||||
class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
"""A parser for the response to `/send_join` requests.
|
||||
|
||||
Args:
|
||||
room_version: The version of the room.
|
||||
v1_api: Whether the response is in the v1 format.
|
||||
"""
|
||||
|
||||
CONTENT_TYPE = "application/json"
|
||||
|
||||
def __init__(self, room_version: RoomVersion, v1_api: bool):
|
||||
self._response = SendJoinResponse([], [])
|
||||
|
||||
# The V1 API has the shape of `[200, {...}]`, which we handle by
|
||||
# prefixing with `item.*`.
|
||||
prefix = "item." if v1_api else ""
|
||||
|
||||
self._coro_state = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.state),
|
||||
prefix + "state.item",
|
||||
)
|
||||
self._coro_auth = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.auth_events),
|
||||
prefix + "auth_chain.item",
|
||||
)
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
self._coro_state.send(data)
|
||||
self._coro_auth.send(data)
|
||||
|
||||
return len(data)
|
||||
|
||||
def finish(self) -> SendJoinResponse:
|
||||
return self._response
|
||||
|
||||
@@ -160,7 +160,7 @@ class Authenticator:
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
# alive
|
||||
retry_timings = await self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
if retry_timings and retry_timings.retry_last_ts:
|
||||
run_in_background(self._reset_retry_timings, origin)
|
||||
|
||||
return origin
|
||||
@@ -1376,6 +1376,32 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/spaces/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
||||
|
||||
exclude_rooms = []
|
||||
if b"exclude_rooms" in query:
|
||||
try:
|
||||
exclude_rooms = [
|
||||
room_id.decode("ascii") for room_id in query[b"exclude_rooms"]
|
||||
]
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
# TODO When switching to the stable endpoint, remove the POST handler.
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -1402,7 +1428,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -15,12 +15,9 @@
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
@@ -36,33 +33,38 @@ class AccountValidityHandler:
|
||||
self.hs = hs
|
||||
self.config = hs.config
|
||||
self.store = self.hs.get_datastore()
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.send_email_handler = self.hs.get_send_email_handler()
|
||||
self.clock = self.hs.get_clock()
|
||||
|
||||
self._account_validity = self.hs.config.account_validity
|
||||
self._app_name = self.hs.config.email_app_name
|
||||
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
self._account_validity_renew_by_email_enabled = (
|
||||
hs.config.account_validity.account_validity_renew_by_email_enabled
|
||||
)
|
||||
|
||||
self._account_validity_period = None
|
||||
if self._account_validity_enabled:
|
||||
self._account_validity_period = (
|
||||
hs.config.account_validity.account_validity_period
|
||||
)
|
||||
|
||||
if (
|
||||
self._account_validity.enabled
|
||||
and self._account_validity.renew_by_email_enabled
|
||||
self._account_validity_enabled
|
||||
and self._account_validity_renew_by_email_enabled
|
||||
):
|
||||
# Don't do email-specific configuration if renewal by email is disabled.
|
||||
self._template_html = self.config.account_validity_template_html
|
||||
self._template_text = self.config.account_validity_template_text
|
||||
|
||||
try:
|
||||
app_name = self.hs.config.email_app_name
|
||||
|
||||
self._subject = self._account_validity.renew_email_subject % {
|
||||
"app": app_name
|
||||
}
|
||||
|
||||
self._from_string = self.hs.config.email_notif_from % {"app": app_name}
|
||||
except Exception:
|
||||
# If substitution failed, fall back to the bare strings.
|
||||
self._subject = self._account_validity.renew_email_subject
|
||||
self._from_string = self.hs.config.email_notif_from
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
self._template_html = (
|
||||
hs.config.account_validity.account_validity_template_html
|
||||
)
|
||||
self._template_text = (
|
||||
hs.config.account_validity.account_validity_template_text
|
||||
)
|
||||
self._renew_email_subject = (
|
||||
hs.config.account_validity.account_validity_renew_email_subject
|
||||
)
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
if hs.config.run_background_tasks:
|
||||
@@ -143,38 +145,17 @@ class AccountValidityHandler:
|
||||
}
|
||||
|
||||
html_text = self._template_html.render(**template_vars)
|
||||
html_part = MIMEText(html_text, "html", "utf8")
|
||||
|
||||
plain_text = self._template_text.render(**template_vars)
|
||||
text_part = MIMEText(plain_text, "plain", "utf8")
|
||||
|
||||
for address in addresses:
|
||||
raw_to = email.utils.parseaddr(address)[1]
|
||||
|
||||
multipart_msg = MIMEMultipart("alternative")
|
||||
multipart_msg["Subject"] = self._subject
|
||||
multipart_msg["From"] = self._from_string
|
||||
multipart_msg["To"] = address
|
||||
multipart_msg["Date"] = email.utils.formatdate()
|
||||
multipart_msg["Message-ID"] = email.utils.make_msgid()
|
||||
multipart_msg.attach(text_part)
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending renewal email to %s", address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self.sendmail(
|
||||
self.hs.config.email_smtp_host,
|
||||
self._raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self.hs.get_reactor(),
|
||||
port=self.hs.config.email_smtp_port,
|
||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||
username=self.hs.config.email_smtp_user,
|
||||
password=self.hs.config.email_smtp_pass,
|
||||
requireTransportSecurity=self.hs.config.require_transport_security,
|
||||
)
|
||||
await self.send_email_handler.send_email(
|
||||
email_address=raw_to,
|
||||
subject=self._renew_email_subject,
|
||||
app_name=self._app_name,
|
||||
html=html_text,
|
||||
text=plain_text,
|
||||
)
|
||||
|
||||
await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True)
|
||||
@@ -220,50 +201,87 @@ class AccountValidityHandler:
|
||||
attempts += 1
|
||||
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
|
||||
|
||||
async def renew_account(self, renewal_token: str) -> bool:
|
||||
async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]:
|
||||
"""Renews the account attached to a given renewal token by pushing back the
|
||||
expiration date by the current validity period in the server's configuration.
|
||||
|
||||
If it turns out that the token is valid but has already been used, then the
|
||||
token is considered stale. A token is stale if the 'token_used_ts_ms' db column
|
||||
is non-null.
|
||||
|
||||
Args:
|
||||
renewal_token: Token sent with the renewal request.
|
||||
Returns:
|
||||
Whether the provided token is valid.
|
||||
A tuple containing:
|
||||
* A bool representing whether the token is valid and unused.
|
||||
* A bool which is `True` if the token is valid, but stale.
|
||||
* An int representing the user's expiry timestamp as milliseconds since the
|
||||
epoch, or 0 if the token was invalid.
|
||||
"""
|
||||
try:
|
||||
user_id = await self.store.get_user_from_renewal_token(renewal_token)
|
||||
(
|
||||
user_id,
|
||||
current_expiration_ts,
|
||||
token_used_ts,
|
||||
) = await self.store.get_user_from_renewal_token(renewal_token)
|
||||
except StoreError:
|
||||
return False
|
||||
return False, False, 0
|
||||
|
||||
# Check whether this token has already been used.
|
||||
if token_used_ts:
|
||||
logger.info(
|
||||
"User '%s' attempted to use previously used token '%s' to renew account",
|
||||
user_id,
|
||||
renewal_token,
|
||||
)
|
||||
return False, True, current_expiration_ts
|
||||
|
||||
logger.debug("Renewing an account for user %s", user_id)
|
||||
await self.renew_account_for_user(user_id)
|
||||
|
||||
return True
|
||||
# Renew the account. Pass the renewal_token here so that it is not cleared.
|
||||
# We want to keep the token around in case the user attempts to renew their
|
||||
# account with the same token twice (clicking the email link twice).
|
||||
#
|
||||
# In that case, the token will be accepted, but the account's expiration ts
|
||||
# will remain unchanged.
|
||||
new_expiration_ts = await self.renew_account_for_user(
|
||||
user_id, renewal_token=renewal_token
|
||||
)
|
||||
|
||||
return True, False, new_expiration_ts
|
||||
|
||||
async def renew_account_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
expiration_ts: Optional[int] = None,
|
||||
email_sent: bool = False,
|
||||
renewal_token: Optional[str] = None,
|
||||
) -> int:
|
||||
"""Renews the account attached to a given user by pushing back the
|
||||
expiration date by the current validity period in the server's
|
||||
configuration.
|
||||
|
||||
Args:
|
||||
renewal_token: Token sent with the renewal request.
|
||||
user_id: The ID of the user to renew.
|
||||
expiration_ts: New expiration date. Defaults to now + validity period.
|
||||
email_sen: Whether an email has been sent for this validity period.
|
||||
Defaults to False.
|
||||
email_sent: Whether an email has been sent for this validity period.
|
||||
renewal_token: Token sent with the renewal request. The user's token
|
||||
will be cleared if this is None.
|
||||
|
||||
Returns:
|
||||
New expiration date for this account, as a timestamp in
|
||||
milliseconds since epoch.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
if expiration_ts is None:
|
||||
expiration_ts = self.clock.time_msec() + self._account_validity.period
|
||||
expiration_ts = now + self._account_validity_period
|
||||
|
||||
await self.store.set_account_validity_for_user(
|
||||
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
|
||||
user_id=user_id,
|
||||
expiration_ts=expiration_ts,
|
||||
email_sent=email_sent,
|
||||
renewal_token=renewal_token,
|
||||
token_used_ts=now,
|
||||
)
|
||||
|
||||
return expiration_ts
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Union
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -33,7 +33,7 @@ from synapse.metrics.background_process_metrics import (
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, UserID
|
||||
from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -17,6 +17,7 @@ import logging
|
||||
import time
|
||||
import unicodedata
|
||||
import urllib.parse
|
||||
from binascii import crc32
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -34,6 +35,7 @@ from typing import (
|
||||
import attr
|
||||
import bcrypt
|
||||
import pymacaroons
|
||||
import unpaddedbase64
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
@@ -66,6 +68,7 @@ from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -808,10 +811,12 @@ class AuthHandler(BaseHandler):
|
||||
logger.info(
|
||||
"Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(puppets_user_id)
|
||||
else:
|
||||
logger.info(
|
||||
"Logging in user %s on device %s%s", user_id, device_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(user_id)
|
||||
|
||||
if (
|
||||
not is_appservice_ghost
|
||||
@@ -819,7 +824,7 @@ class AuthHandler(BaseHandler):
|
||||
):
|
||||
await self.auth.check_auth_blocking(user_id)
|
||||
|
||||
access_token = self.macaroon_gen.generate_access_token(user_id)
|
||||
access_token = self.generate_access_token(target_user_id_obj)
|
||||
await self.store.add_access_token_to_user(
|
||||
user_id=user_id,
|
||||
token=access_token,
|
||||
@@ -1192,6 +1197,19 @@ class AuthHandler(BaseHandler):
|
||||
return None
|
||||
return user_id
|
||||
|
||||
def generate_access_token(self, for_user: UserID) -> str:
|
||||
"""Generates an opaque string, for use as an access token"""
|
||||
|
||||
# we use the following format for access tokens:
|
||||
# syt_<base64 local part>_<random string>_<base62 crc check>
|
||||
|
||||
b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8"))
|
||||
random_string = stringutils.random_string(20)
|
||||
base = f"syt_{b64local}_{random_string}"
|
||||
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
@@ -1248,7 +1266,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
for token, token_id, device_id in tokens_and_devices:
|
||||
for token, _, device_id in tokens_and_devices:
|
||||
await provider.on_logged_out(
|
||||
user_id=user_id, device_id=device_id, access_token=token
|
||||
)
|
||||
@@ -1585,10 +1603,7 @@ class MacaroonGenerator:
|
||||
|
||||
hs = attr.ib()
|
||||
|
||||
def generate_access_token(
|
||||
self, user_id: str, extra_caveats: Optional[List[str]] = None
|
||||
) -> str:
|
||||
extra_caveats = extra_caveats or []
|
||||
def generate_guest_access_token(self, user_id: str) -> str:
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
# Include a nonce, to make sure that each login gets a different
|
||||
@@ -1596,8 +1611,7 @@ class MacaroonGenerator:
|
||||
macaroon.add_first_party_caveat(
|
||||
"nonce = %s" % (stringutils.random_string_with_symbols(16),)
|
||||
)
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
macaroon.add_first_party_caveat("guest = true")
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(
|
||||
|
||||
@@ -49,7 +49,9 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
if hs.config.run_background_tasks:
|
||||
hs.get_reactor().callWhenRunning(self._start_user_parting)
|
||||
|
||||
self._account_validity_enabled = hs.config.account_validity.enabled
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
|
||||
async def deactivate_account(
|
||||
self,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
@@ -28,7 +28,6 @@ from synapse.api.errors import (
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import (
|
||||
Collection,
|
||||
JsonDict,
|
||||
StreamToken,
|
||||
UserID,
|
||||
@@ -156,8 +155,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
# The user may have left the room
|
||||
# TODO: Check if they actually did or if we were just invited.
|
||||
if room_id not in room_ids:
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_left.add(state_key)
|
||||
@@ -179,8 +177,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
log_kv(
|
||||
{"event": "encountered empty previous state", "room_id": room_id}
|
||||
)
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
@@ -198,8 +195,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
for state_dict in prev_state_ids.values():
|
||||
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
||||
if not member_event or member_event != current_member_id:
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
@@ -484,7 +480,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
"device_list_key", position, users=[user_id], rooms=room_ids
|
||||
)
|
||||
|
||||
if hosts and self.federation_sender:
|
||||
if hosts:
|
||||
logger.info(
|
||||
"Sending device list update notif for %r to: %r", user_id, hosts
|
||||
)
|
||||
@@ -714,7 +710,7 @@ class DeviceListUpdater:
|
||||
# This can happen since we batch updates
|
||||
return
|
||||
|
||||
for device_id, stream_id, prev_ids, content in pending_updates:
|
||||
for device_id, stream_id, prev_ids, _ in pending_updates:
|
||||
logger.debug(
|
||||
"Handling update %r/%r, ID: %r, prev: %r ",
|
||||
user_id,
|
||||
@@ -740,7 +736,7 @@ class DeviceListUpdater:
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
for device_id, stream_id, prev_ids, content in pending_updates:
|
||||
for device_id, stream_id, _, content in pending_updates:
|
||||
await self.store.update_remote_device_list_cache_entry(
|
||||
user_id, device_id, content, stream_id
|
||||
)
|
||||
@@ -929,6 +925,10 @@ class DeviceListUpdater:
|
||||
else:
|
||||
cached_devices = await self.store.get_cached_devices_for_user(user_id)
|
||||
if cached_devices == {d["device_id"]: d for d in devices}:
|
||||
logging.info(
|
||||
"Skipping device list resync for %s, as our cache matches already",
|
||||
user_id,
|
||||
)
|
||||
devices = []
|
||||
ignore_devices = True
|
||||
|
||||
@@ -944,6 +944,9 @@ class DeviceListUpdater:
|
||||
await self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id
|
||||
)
|
||||
# mark the cache as valid, whether or not we actually processed any device
|
||||
# list updates.
|
||||
await self.store.mark_remote_user_device_cache_as_valid(user_id)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
|
||||
# Handle cross-signing keys.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.constants import ToDeviceEventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -51,7 +51,9 @@ class DeviceMessageHandler:
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
|
||||
# in the to-device replication stream.
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.federation_sender = None
|
||||
if hs.should_send_federation():
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
# If we can handle the to device EDUs we do so, otherwise we route them
|
||||
# to the appropriate worker.
|
||||
@@ -77,6 +79,8 @@ class DeviceMessageHandler:
|
||||
ReplicationUserDevicesResyncRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# a rate limiter for room key requests. The keys are
|
||||
# (sending_user_id, sending_device_id).
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
@@ -98,12 +102,25 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in content["messages"].items():
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
logger.warning("Request for keys for non-local user %s", user_id)
|
||||
logger.warning("To-device message to non-local user %s", user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not by_device:
|
||||
continue
|
||||
|
||||
# Ratelimit key requests by the sending user.
|
||||
if message_type == ToDeviceEventTypes.RoomKeyRequest:
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
None, (sender_user_id, None)
|
||||
)
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
@@ -190,13 +207,19 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in messages.items():
|
||||
# Ratelimit local cross-user key requests by the sending device.
|
||||
if (
|
||||
message_type == EduTypes.RoomKeyRequest
|
||||
message_type == ToDeviceEventTypes.RoomKeyRequest
|
||||
and user_id != sender_user_id
|
||||
and await self._ratelimiter.can_do_action(
|
||||
):
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
requester, (sender_user_id, requester.device_id)
|
||||
)
|
||||
):
|
||||
continue
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
import logging
|
||||
import string
|
||||
from typing import Iterable, List, Optional
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
|
||||
from synapse.api.errors import (
|
||||
@@ -27,15 +27,19 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DirectoryHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -60,7 +64,7 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id: str,
|
||||
servers: Optional[Iterable[str]] = None,
|
||||
creator: Optional[str] = None,
|
||||
):
|
||||
) -> None:
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
@@ -74,7 +78,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
servers = {get_domain_from_id(u) for u in users}
|
||||
|
||||
if not servers:
|
||||
@@ -104,8 +108,9 @@ class DirectoryHandler(BaseHandler):
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
room_alias_str = room_alias.to_string()
|
||||
|
||||
if len(room_alias.to_string()) > MAX_ALIAS_LENGTH:
|
||||
if len(room_alias_str) > MAX_ALIAS_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,
|
||||
@@ -114,7 +119,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
service = requester.app_service
|
||||
if service:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
if not service.is_interested_in_alias(room_alias_str):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias.",
|
||||
@@ -138,7 +143,7 @@ class DirectoryHandler(BaseHandler):
|
||||
raise AuthError(403, "This user is not permitted to create this alias")
|
||||
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias.to_string()
|
||||
user_id, room_id, room_alias_str
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
@@ -211,7 +216,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def delete_appservice_association(
|
||||
self, service: ApplicationService, room_alias: RoomAlias
|
||||
):
|
||||
) -> None:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -220,7 +225,7 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
await self._delete_association(room_alias)
|
||||
|
||||
async def _delete_association(self, room_alias: RoomAlias):
|
||||
async def _delete_association(self, room_alias: RoomAlias) -> str:
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
@@ -228,17 +233,19 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return room_id
|
||||
|
||||
async def get_association(self, room_alias: RoomAlias):
|
||||
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
|
||||
room_id = None
|
||||
if self.hs.is_mine(room_alias):
|
||||
result = await self.get_association_from_room_alias(room_alias)
|
||||
result = await self.get_association_from_room_alias(
|
||||
room_alias
|
||||
) # type: Optional[RoomAliasMapping]
|
||||
|
||||
if result:
|
||||
room_id = result.room_id
|
||||
servers = result.servers
|
||||
else:
|
||||
try:
|
||||
result = await self.federation.make_query(
|
||||
fed_result = await self.federation.make_query(
|
||||
destination=room_alias.domain,
|
||||
query_type="directory",
|
||||
args={"room_alias": room_alias.to_string()},
|
||||
@@ -248,13 +255,13 @@ class DirectoryHandler(BaseHandler):
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
if e.code == 404:
|
||||
result = None
|
||||
fed_result = None
|
||||
else:
|
||||
raise
|
||||
|
||||
if result and "room_id" in result and "servers" in result:
|
||||
room_id = result["room_id"]
|
||||
servers = result["servers"]
|
||||
if fed_result and "room_id" in fed_result and "servers" in fed_result:
|
||||
room_id = fed_result["room_id"]
|
||||
servers = fed_result["servers"]
|
||||
|
||||
if not room_id:
|
||||
raise SynapseError(
|
||||
@@ -263,7 +270,7 @@ class DirectoryHandler(BaseHandler):
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
extra_servers = {get_domain_from_id(u) for u in users}
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
@@ -275,7 +282,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return {"room_id": room_id, "servers": servers}
|
||||
|
||||
async def on_directory_query(self, args):
|
||||
async def on_directory_query(self, args: JsonDict) -> JsonDict:
|
||||
room_alias = RoomAlias.from_string(args["room_alias"])
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
|
||||
@@ -293,7 +300,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def _update_canonical_alias(
|
||||
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
|
||||
):
|
||||
) -> None:
|
||||
"""
|
||||
Send an updated canonical alias event if the removed alias was set as
|
||||
the canonical alias or listed in the alt_aliases field.
|
||||
@@ -344,7 +351,9 @@ class DirectoryHandler(BaseHandler):
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
async def get_association_from_room_alias(self, room_alias: RoomAlias):
|
||||
async def get_association_from_room_alias(
|
||||
self, room_alias: RoomAlias
|
||||
) -> Optional[RoomAliasMapping]:
|
||||
result = await self.store.get_association_from_room_alias(room_alias)
|
||||
if not result:
|
||||
# Query AS to see if it exists
|
||||
@@ -372,7 +381,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
return True
|
||||
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str):
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool:
|
||||
"""Determine whether a user can delete an alias.
|
||||
|
||||
One of the following must be true:
|
||||
@@ -394,14 +403,13 @@ class DirectoryHandler(BaseHandler):
|
||||
if not room_id:
|
||||
return False
|
||||
|
||||
res = await self.auth.check_can_change_room_list(
|
||||
return await self.auth.check_can_change_room_list(
|
||||
room_id, UserID.from_string(user_id)
|
||||
)
|
||||
return res
|
||||
|
||||
async def edit_published_room_list(
|
||||
self, requester: Requester, room_id: str, visibility: str
|
||||
):
|
||||
) -> None:
|
||||
"""Edit the entry of the room in the published room list.
|
||||
|
||||
requester
|
||||
@@ -469,7 +477,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def edit_published_appservice_room_list(
|
||||
self, appservice_id: str, network_id: str, room_id: str, visibility: str
|
||||
):
|
||||
) -> None:
|
||||
"""Add or remove a room from the appservice/network specific public
|
||||
room list.
|
||||
|
||||
@@ -499,5 +507,4 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
|
||||
aliases = await self.store.get_aliases_for_room(room_id)
|
||||
return aliases
|
||||
return await self.store.get_aliases_for_room(room_id)
|
||||
|
||||
167
synapse/handlers/event_auth.py
Normal file
167
synapse/handlers/event_auth.py
Normal file
@@ -0,0 +1,167 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Collection, Optional
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class EventAuthHandler:
|
||||
"""
|
||||
This class contains methods for authenticating events added to room graphs.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def check_restricted_join_rules(
|
||||
self,
|
||||
state_ids: StateMap[str],
|
||||
room_version: RoomVersion,
|
||||
user_id: str,
|
||||
prev_member_event: Optional[EventBase],
|
||||
) -> None:
|
||||
"""
|
||||
Check whether a user can join a room without an invite due to restricted join rules.
|
||||
|
||||
When joining a room with restricted joined rules (as defined in MSC3083),
|
||||
the membership of spaces must be checked during a room join.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room being joined.
|
||||
user_id: The user joining the room.
|
||||
prev_member_event: The current membership event for this user.
|
||||
|
||||
Raises:
|
||||
AuthError if the user cannot join the room.
|
||||
"""
|
||||
# If the member is invited or currently joined, then nothing to do.
|
||||
if prev_member_event and (
|
||||
prev_member_event.membership in (Membership.JOIN, Membership.INVITE)
|
||||
):
|
||||
return
|
||||
|
||||
# This is not a room with a restricted join rule, so we don't need to do the
|
||||
# restricted room specific checks.
|
||||
#
|
||||
# Note: We'll be applying the standard join rule checks later, which will
|
||||
# catch the cases of e.g. trying to join private rooms without an invite.
|
||||
if not await self.has_restricted_join_rules(state_ids, room_version):
|
||||
return
|
||||
|
||||
# Get the spaces which allow access to this room and check if the user is
|
||||
# in any of them.
|
||||
allowed_spaces = await self.get_spaces_that_allow_join(state_ids)
|
||||
if not await self.is_user_in_rooms(allowed_spaces, user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
|
||||
async def has_restricted_join_rules(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion
|
||||
) -> bool:
|
||||
"""
|
||||
Return if the room has the proper join rules set for access via spaces.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room to query.
|
||||
|
||||
Returns:
|
||||
True if the proper room version and join rules are set for restricted access.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
return False
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return False
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED
|
||||
|
||||
async def get_spaces_that_allow_join(
|
||||
self, state_ids: StateMap[str]
|
||||
) -> Collection[str]:
|
||||
"""
|
||||
Generate a list of spaces which allow access to a room.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
|
||||
Returns:
|
||||
A collection of spaces which provide membership to the room.
|
||||
"""
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return ()
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
|
||||
# If allowed is of the wrong form, then only allow invited users.
|
||||
allowed_spaces = join_rules_event.content.get("allow", [])
|
||||
if not isinstance(allowed_spaces, list):
|
||||
return ()
|
||||
|
||||
# Pull out the other room IDs, invalid data gets filtered.
|
||||
result = []
|
||||
for space in allowed_spaces:
|
||||
if not isinstance(space, dict):
|
||||
continue
|
||||
|
||||
space_id = space.get("space")
|
||||
if not isinstance(space_id, str):
|
||||
continue
|
||||
|
||||
result.append(space_id)
|
||||
|
||||
return result
|
||||
|
||||
async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool:
|
||||
"""
|
||||
Check whether a user is a member of any of the provided rooms.
|
||||
|
||||
Args:
|
||||
room_ids: The rooms to check for membership.
|
||||
user_id: The user to check.
|
||||
|
||||
Returns:
|
||||
True if the user is in any of the rooms, false otherwise.
|
||||
"""
|
||||
if not room_ids:
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
|
||||
# Check each room and see if the user is in it.
|
||||
for room_id in room_ids:
|
||||
if room_id in joined_rooms:
|
||||
return True
|
||||
|
||||
# The user was not in any of the rooms.
|
||||
return False
|
||||
@@ -103,7 +103,7 @@ class EventStreamHandler(BaseHandler):
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = await self.state.get_current_users_in_room(
|
||||
users = await self.store.get_users_in_room(
|
||||
event.room_id
|
||||
) # type: Iterable[str]
|
||||
else:
|
||||
|
||||
@@ -146,6 +146,7 @@ class FederationHandler(BaseHandler):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._message_handler = hs.get_message_handler()
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
self.config = hs.config
|
||||
@@ -551,8 +552,12 @@ class FederationHandler(BaseHandler):
|
||||
destination: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[List[EventBase], List[EventBase]]:
|
||||
"""Requests all of the room state at a given event from a remote homeserver.
|
||||
) -> List[EventBase]:
|
||||
"""Requests all of the room state at a given event from a remote
|
||||
homeserver.
|
||||
|
||||
Will also fetch any missing events reported in the `auth_chain_ids`
|
||||
section of `/state_ids`.
|
||||
|
||||
Args:
|
||||
destination: The remote homeserver to query for the state.
|
||||
@@ -560,8 +565,7 @@ class FederationHandler(BaseHandler):
|
||||
event_id: The id of the event we want the state at.
|
||||
|
||||
Returns:
|
||||
A list of events in the state, not including the event itself, and
|
||||
a list of events in the auth chain for the given event.
|
||||
A list of events in the state, not including the event itself.
|
||||
"""
|
||||
(
|
||||
state_event_ids,
|
||||
@@ -570,68 +574,53 @@ class FederationHandler(BaseHandler):
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
desired_events = set(state_event_ids + auth_event_ids)
|
||||
|
||||
event_map = await self._get_events_from_store_or_dest(
|
||||
destination, room_id, desired_events
|
||||
)
|
||||
|
||||
failed_to_fetch = desired_events - event_map.keys()
|
||||
if failed_to_fetch:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state/auth events for %s %s",
|
||||
event_id,
|
||||
failed_to_fetch,
|
||||
)
|
||||
|
||||
remote_state = [
|
||||
event_map[e_id] for e_id in state_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
return remote_state, auth_chain
|
||||
|
||||
async def _get_events_from_store_or_dest(
|
||||
self, destination: str, room_id: str, event_ids: Iterable[str]
|
||||
) -> Dict[str, EventBase]:
|
||||
"""Fetch events from a remote destination, checking if we already have them.
|
||||
|
||||
Persists any events we don't already have as outliers.
|
||||
|
||||
If we fail to fetch any of the events, a warning will be logged, and the event
|
||||
will be omitted from the result. Likewise, any events which turn out not to
|
||||
be in the given room.
|
||||
|
||||
This function *does not* automatically get missing auth events of the
|
||||
newly fetched events. Callers must include the full auth chain of
|
||||
of the missing events in the `event_ids` argument, to ensure that any
|
||||
missing auth events are correctly fetched.
|
||||
|
||||
Returns:
|
||||
map from event_id to event
|
||||
"""
|
||||
fetched_events = await self.store.get_events(event_ids, allow_rejected=True)
|
||||
|
||||
missing_events = set(event_ids) - fetched_events.keys()
|
||||
|
||||
if missing_events:
|
||||
logger.debug(
|
||||
"Fetching unknown state/auth events %s for room %s",
|
||||
missing_events,
|
||||
room_id,
|
||||
)
|
||||
# Fetch the state events from the DB, and check we have the auth events.
|
||||
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
|
||||
auth_events_in_store = await self.store.have_seen_events(auth_event_ids)
|
||||
|
||||
# Check for missing events. We handle state and auth event seperately,
|
||||
# as we want to pull the state from the DB, but we don't for the auth
|
||||
# events. (Note: we likely won't use the majority of the auth chain, and
|
||||
# it can be *huge* for large rooms, so it's worth ensuring that we don't
|
||||
# unnecessarily pull it from the DB).
|
||||
missing_state_events = set(state_event_ids) - set(event_map)
|
||||
missing_auth_events = set(auth_event_ids) - set(auth_events_in_store)
|
||||
if missing_state_events or missing_auth_events:
|
||||
await self._get_events_and_persist(
|
||||
destination=destination, room_id=room_id, events=missing_events
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
events=missing_state_events | missing_auth_events,
|
||||
)
|
||||
|
||||
# we need to make sure we re-load from the database to get the rejected
|
||||
# state correct.
|
||||
fetched_events.update(
|
||||
(await self.store.get_events(missing_events, allow_rejected=True))
|
||||
)
|
||||
if missing_state_events:
|
||||
new_events = await self.store.get_events(
|
||||
missing_state_events, allow_rejected=True
|
||||
)
|
||||
event_map.update(new_events)
|
||||
|
||||
missing_state_events.difference_update(new_events)
|
||||
|
||||
if missing_state_events:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state events for %s %s",
|
||||
event_id,
|
||||
missing_state_events,
|
||||
)
|
||||
|
||||
if missing_auth_events:
|
||||
auth_events_in_store = await self.store.have_seen_events(
|
||||
missing_auth_events
|
||||
)
|
||||
missing_auth_events.difference_update(auth_events_in_store)
|
||||
|
||||
if missing_auth_events:
|
||||
logger.warning(
|
||||
"Failed to fetch missing auth events for %s %s",
|
||||
event_id,
|
||||
missing_auth_events,
|
||||
)
|
||||
|
||||
remote_state = list(event_map.values())
|
||||
|
||||
# check for events which were in the wrong room.
|
||||
#
|
||||
@@ -639,8 +628,8 @@ class FederationHandler(BaseHandler):
|
||||
# auth_events at an event in room A are actually events in room B
|
||||
|
||||
bad_events = [
|
||||
(event_id, event.room_id)
|
||||
for event_id, event in fetched_events.items()
|
||||
(event.event_id, event.room_id)
|
||||
for event in remote_state
|
||||
if event.room_id != room_id
|
||||
]
|
||||
|
||||
@@ -657,9 +646,10 @@ class FederationHandler(BaseHandler):
|
||||
room_id,
|
||||
)
|
||||
|
||||
del fetched_events[bad_event_id]
|
||||
if bad_events:
|
||||
remote_state = [e for e in remote_state if e.room_id == room_id]
|
||||
|
||||
return fetched_events
|
||||
return remote_state
|
||||
|
||||
async def _get_state_after_missing_prev_event(
|
||||
self,
|
||||
@@ -962,27 +952,23 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# For each edge get the current state.
|
||||
|
||||
auth_events = {}
|
||||
state_events = {}
|
||||
events_to_state = {}
|
||||
for e_id in edges:
|
||||
state, auth = await self._get_state_for_room(
|
||||
state = await self._get_state_for_room(
|
||||
destination=dest,
|
||||
room_id=room_id,
|
||||
event_id=e_id,
|
||||
)
|
||||
auth_events.update({a.event_id: a for a in auth})
|
||||
auth_events.update({s.event_id: s for s in state})
|
||||
state_events.update({s.event_id: s for s in state})
|
||||
events_to_state[e_id] = state
|
||||
|
||||
required_auth = {
|
||||
a_id
|
||||
for event in events
|
||||
+ list(state_events.values())
|
||||
+ list(auth_events.values())
|
||||
for event in events + list(state_events.values())
|
||||
for a_id in event.auth_event_ids()
|
||||
}
|
||||
auth_events = await self.store.get_events(required_auth, allow_rejected=True)
|
||||
auth_events.update(
|
||||
{e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
|
||||
)
|
||||
@@ -1673,8 +1659,29 @@ class FederationHandler(BaseHandler):
|
||||
# would introduce the danger of backwards-compatibility problems.
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
# Calculate the event context.
|
||||
context = await self.state_handler.compute_event_context(event)
|
||||
context = await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
# Get the state before the new event.
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
# Check if the user is already in the room or invited to the room.
|
||||
user_id = event.state_key
|
||||
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
||||
prev_member_event = None
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
|
||||
# Check if the member should be allowed access via membership in a space.
|
||||
await self._event_auth_handler.check_restricted_join_rules(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
prev_member_event,
|
||||
)
|
||||
|
||||
# Persist the event.
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: After _auth_and_persist_event: %s, sigs: %s",
|
||||
@@ -1682,8 +1689,6 @@ class FederationHandler(BaseHandler):
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
state_ids = list(prev_state_ids.values())
|
||||
auth_chain = await self.store.get_auth_chain(event.room_id, state_ids)
|
||||
|
||||
@@ -2006,7 +2011,7 @@ class FederationHandler(BaseHandler):
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> EventContext:
|
||||
) -> None:
|
||||
"""
|
||||
Process an event by performing auth checks and then persisting to the database.
|
||||
|
||||
@@ -2028,9 +2033,6 @@ class FederationHandler(BaseHandler):
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
context = await self._check_event_auth(
|
||||
origin,
|
||||
@@ -2060,8 +2062,6 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
raise
|
||||
|
||||
return context
|
||||
|
||||
async def _auth_and_persist_events(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -2420,7 +2420,9 @@ class FederationHandler(BaseHandler):
|
||||
# If we are going to send this event over federation we precaclculate
|
||||
# the joined hosts.
|
||||
if event.internal_metadata.get_send_on_behalf_of():
|
||||
await self.event_creation_handler.cache_joined_hosts_for_event(event)
|
||||
await self.event_creation_handler.cache_joined_hosts_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
@@ -2956,7 +2958,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
# for each sig on the third_party_invite block of the actual invite
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
for key_name in signature_block.keys():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user