Compare commits
116 Commits
erikj/remo
...
hs/use-mal
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7ea2cd1ce9 | ||
|
|
451f25172a | ||
|
|
91143bb24e | ||
|
|
47806b0869 | ||
|
|
a683028d81 | ||
|
|
7562d887e1 | ||
|
|
affaffb0ab | ||
|
|
63fb220e5f | ||
|
|
27c375f812 | ||
|
|
f4833e0c06 | ||
|
|
28c6841102 | ||
|
|
652a6b094d | ||
|
|
d1473f7362 | ||
|
|
dc6366a9bd | ||
|
|
86fb71431c | ||
|
|
b378d98c8f | ||
|
|
7967b36efe | ||
|
|
03318a766c | ||
|
|
2b2985b5cf | ||
|
|
51065c44bb | ||
|
|
6c84778549 | ||
|
|
765473567c | ||
|
|
b65ecaff9b | ||
|
|
4df26abf28 | ||
|
|
25f43faa70 | ||
|
|
8771b1337d | ||
|
|
eba431c539 | ||
|
|
a8803e2b6e | ||
|
|
ac88aca7f7 | ||
|
|
24f07a83e6 | ||
|
|
70f0ffd2fc | ||
|
|
d783880083 | ||
|
|
37623e3382 | ||
|
|
e2a443550e | ||
|
|
ef889c98a6 | ||
|
|
1fb9a2d0bf | ||
|
|
de8f0a03a3 | ||
|
|
d0aee697ac | ||
|
|
d5305000f1 | ||
|
|
e9eb3549d3 | ||
|
|
a61b13c0a1 | ||
|
|
0644ac0989 | ||
|
|
e3bc4617fc | ||
|
|
b85821aca2 | ||
|
|
56c4b47df3 | ||
|
|
4d624f467a | ||
|
|
d11f2dfee5 | ||
|
|
bb4b11846f | ||
|
|
e9444cc74d | ||
|
|
0085dc5abc | ||
|
|
802560211a | ||
|
|
e4ab8676b4 | ||
|
|
10a08ab88a | ||
|
|
fa6679e794 | ||
|
|
8ba086980d | ||
|
|
391bfe9a7b | ||
|
|
787de3190f | ||
|
|
4e0fd35bc9 | ||
|
|
dd2d32dcdb | ||
|
|
fe604a022a | ||
|
|
1350b053da | ||
|
|
0ffa5fb935 | ||
|
|
3ff2251754 | ||
|
|
84936e2264 | ||
|
|
695b73c861 | ||
|
|
59d24c5bef | ||
|
|
e83627926f | ||
|
|
a15c003e5b | ||
|
|
ceaa76970f | ||
|
|
9d25a0ae65 | ||
|
|
d924827da1 | ||
|
|
3853a7edfc | ||
|
|
51a20914a8 | ||
|
|
c1ddbbde4f | ||
|
|
177dae2704 | ||
|
|
69018acbd2 | ||
|
|
294c675033 | ||
|
|
3186324260 | ||
|
|
0f2629ebc6 | ||
|
|
dac4445934 | ||
|
|
79e6d9e4b1 | ||
|
|
ca380881b1 | ||
|
|
55159c48e3 | ||
|
|
ca6ecb8d67 | ||
|
|
8798f2291c | ||
|
|
046175daba | ||
|
|
0c23aa393c | ||
|
|
d9bd62f9d1 | ||
|
|
4b2217ace2 | ||
|
|
a0972085ed | ||
|
|
bdb4c20dc1 | ||
|
|
acb8c81041 | ||
|
|
98a1b84631 | ||
|
|
026a66f2b3 | ||
|
|
a745531c10 | ||
|
|
30c94862b4 | ||
|
|
5d281c10dd | ||
|
|
683d6f75af | ||
|
|
eccacd72cb | ||
|
|
b8c5f6fddb | ||
|
|
272402c4d7 | ||
|
|
05fa06834d | ||
|
|
913f790bb2 | ||
|
|
6982db9651 | ||
|
|
438a8594cb | ||
|
|
e031c7e0cc | ||
|
|
0a88ec0a87 | ||
|
|
b076bc276e | ||
|
|
de0d088adc | ||
|
|
db70435de7 | ||
|
|
495b214f4f | ||
|
|
71f0623de9 | ||
|
|
e694a598f8 | ||
|
|
2b7dd21655 | ||
|
|
c571736c6c | ||
|
|
601b893352 |
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create a PostgresEngine.
|
||||
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||
|
||||
# Connect to postgres to create the base database.
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = db_engine.module.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("CREATE DATABASE synapse;")
|
||||
cur.close()
|
||||
db_conn.close()
|
||||
31
.buildkite/scripts/postgres_exec.py
Executable file
31
.buildkite/scripts/postgres_exec.py
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
||||
@@ -1,10 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - sets up synapse and deps
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
@@ -22,15 +22,32 @@ echo "--- Generate the signing key"
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare the databases"
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
./.buildkite/scripts/create_postgres_db.py
|
||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db"
|
||||
|
||||
# Run the script
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
# Now do the same again, on an empty database.
|
||||
|
||||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .buildkite/test_db.db
|
||||
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -273,7 +273,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
|
||||
220
CHANGES.md
220
CHANGES.md
@@ -1,11 +1,227 @@
|
||||
Synapse 1.32.0rc1 (2021-04-13)
|
||||
Synapse 1.34.0rc1 (2021-05-12)
|
||||
==============================
|
||||
|
||||
This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting.
|
||||
|
||||
This release also deprecates the `POST /_synapse/admin/v1/rooms/<room_id>/delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/<room_id>` route instead.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881))
|
||||
- Add support for `DELETE /_synapse/admin/v1/rooms/<room_id>`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902))
|
||||
- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951))
|
||||
- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916))
|
||||
- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966))
|
||||
- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935))
|
||||
- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945))
|
||||
- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895))
|
||||
- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910))
|
||||
- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928))
|
||||
- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930))
|
||||
- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965))
|
||||
- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Mark as deprecated `POST /_synapse/admin/v1/rooms/<room_id>/delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588))
|
||||
- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882))
|
||||
- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885))
|
||||
- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886))
|
||||
- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904))
|
||||
- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931))
|
||||
- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932))
|
||||
- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959))
|
||||
|
||||
|
||||
Synapse 1.33.2 (2021-05-11)
|
||||
===========================
|
||||
|
||||
Due to the security issue highlighted below, server administrators are encouraged to update Synapse. We are not aware of these vulnerabilities being exploited in the wild.
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
This release fixes a denial of service attack ([CVE-2021-29471](https://github.com/matrix-org/synapse/security/advisories/GHSA-x345-32rc-8h85)) against Synapse's push rules implementation. Server admins are encouraged to upgrade.
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Unpin attrs dependency. ([\#9946](https://github.com/matrix-org/synapse/issues/9946))
|
||||
|
||||
|
||||
Synapse 1.33.1 (2021-05-06)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. ([\#9937](https://github.com/matrix-org/synapse/issues/9937))
|
||||
|
||||
|
||||
Synapse 1.33.0 (2021-05-05)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). ([\#9909](https://github.com/matrix-org/synapse/issues/9909))
|
||||
|
||||
|
||||
Synapse 1.33.0rc2 (2021-04-29)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix tight loop when handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900))
|
||||
|
||||
|
||||
Synapse 1.33.0rc1 (2021-04-28)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814))
|
||||
- Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850))
|
||||
- Return a new template when an user attempts to renew their account multiple times with the same token, stating that their account is set to expire. This replaces the invalid token template that would previously be shown in this case. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. ([\#9726](https://github.com/matrix-org/synapse/issues/9726))
|
||||
- Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. ([\#9788](https://github.com/matrix-org/synapse/issues/9788))
|
||||
- Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. ([\#9802](https://github.com/matrix-org/synapse/issues/9802))
|
||||
- Limit the size of HTTP responses read over federation. ([\#9833](https://github.com/matrix-org/synapse/issues/9833))
|
||||
- Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. ([\#9867](https://github.com/matrix-org/synapse/issues/9867))
|
||||
- Fix a long-standing bug where errors from federation did not propagate to the client. ([\#9868](https://github.com/matrix-org/synapse/issues/9868))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. ([\#9801](https://github.com/matrix-org/synapse/issues/9801))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add a dockerfile for running Synapse in worker-mode under Complement. ([\#9162](https://github.com/matrix-org/synapse/issues/9162))
|
||||
- Apply `pyupgrade` across the codebase. ([\#9786](https://github.com/matrix-org/synapse/issues/9786))
|
||||
- Move some replication processing out of `generic_worker`. ([\#9796](https://github.com/matrix-org/synapse/issues/9796))
|
||||
- Replace `HomeServer.get_config()` with inline references. ([\#9815](https://github.com/matrix-org/synapse/issues/9815))
|
||||
- Rename some handlers and config modules to not duplicate the top-level module. ([\#9816](https://github.com/matrix-org/synapse/issues/9816))
|
||||
- Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. ([\#9817](https://github.com/matrix-org/synapse/issues/9817))
|
||||
- Reduce CPU usage of the user directory by reusing existing calculated room membership. ([\#9821](https://github.com/matrix-org/synapse/issues/9821))
|
||||
- Small speed up for joining large remote rooms. ([\#9825](https://github.com/matrix-org/synapse/issues/9825))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9838](https://github.com/matrix-org/synapse/issues/9838))
|
||||
- Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. ([\#9845](https://github.com/matrix-org/synapse/issues/9845))
|
||||
- Limit length of accepted email addresses. ([\#9855](https://github.com/matrix-org/synapse/issues/9855))
|
||||
- Remove redundant `synapse.types.Collection` type definition. ([\#9856](https://github.com/matrix-org/synapse/issues/9856))
|
||||
- Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. ([\#9858](https://github.com/matrix-org/synapse/issues/9858))
|
||||
- Disable invite rate-limiting by default when running the unit tests. ([\#9871](https://github.com/matrix-org/synapse/issues/9871))
|
||||
- Pass a reactor into `SynapseSite` to make testing easier. ([\#9874](https://github.com/matrix-org/synapse/issues/9874))
|
||||
- Make `DomainSpecificString` an `attrs` class. ([\#9875](https://github.com/matrix-org/synapse/issues/9875))
|
||||
- Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. ([\#9876](https://github.com/matrix-org/synapse/issues/9876))
|
||||
- Remove redundant `_PushHTTPChannel` test class. ([\#9878](https://github.com/matrix-org/synapse/issues/9878))
|
||||
- Remove backwards-compatibility code for Python versions < 3.6. ([\#9879](https://github.com/matrix-org/synapse/issues/9879))
|
||||
- Small performance improvement around handling new local presence updates. ([\#9887](https://github.com/matrix-org/synapse/issues/9887))
|
||||
|
||||
|
||||
Synapse 1.32.2 (2021-04-22)
|
||||
===========================
|
||||
|
||||
This release includes a fix for a regression introduced in 1.32.0.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.32.0 and 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857))
|
||||
|
||||
|
||||
Synapse 1.32.1 (2021-04-21)
|
||||
===========================
|
||||
|
||||
This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
|
||||
However, as this release is still subject to the `LoggingContext` change in 1.32.0,
|
||||
it is recommended to remain on or downgrade to 1.31.0.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. ([\#9854](https://github.com/matrix-org/synapse/issues/9854))
|
||||
|
||||
|
||||
Synapse 1.32.0 (2021-04-20)
|
||||
===========================
|
||||
|
||||
**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
that can overwhelm connected Prometheus instances. This issue was not present in
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183)
|
||||
to clean up any excess writeahead logs.
|
||||
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
modules that import `synapse.logging.context.LoggingContext`, such as
|
||||
[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider).
|
||||
This will be fixed in a later Synapse version.
|
||||
|
||||
**Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+.
|
||||
|
||||
This release removes the deprecated `GET /_synapse/admin/v1/users/<user_id>` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities.
|
||||
|
||||
This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you
|
||||
will need to get a fresh copy of the keys. You can do so with:
|
||||
|
||||
```sh
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
```
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix the log lines of nested logging contexts. Broke in 1.32.0rc1. ([\#9829](https://github.com/matrix-org/synapse/issues/9829))
|
||||
|
||||
|
||||
Synapse 1.32.0rc1 (2021-04-13)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
82
UPGRADE.rst
82
UPGRADE.rst
@@ -85,9 +85,81 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.34.0
|
||||
====================
|
||||
|
||||
`room_invite_state_types` configuration setting
|
||||
-----------------------------------------------
|
||||
|
||||
The ``room_invite_state_types`` configuration setting has been deprecated and
|
||||
replaced with ``room_prejoin_state``. See the `sample configuration file <https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515>`_.
|
||||
|
||||
If you have set ``room_invite_state_types`` to the default value you should simply
|
||||
remove it from your configuration file. The default value used to be:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.encryption"
|
||||
- "m.room.name"
|
||||
|
||||
If you have customised this value by adding addition state types, you should
|
||||
remove ``room_invite_state_types`` and configure ``additional_event_types`` with
|
||||
your customisations.
|
||||
|
||||
If you have customised this value by removing state types, you should rename
|
||||
``room_invite_state_types`` to ``additional_event_types``, and set
|
||||
``disable_default_event_types`` to ``true``.
|
||||
|
||||
Upgrading to v1.33.0
|
||||
====================
|
||||
|
||||
Account Validity HTML templates can now display a user's expiration date
|
||||
------------------------------------------------------------------------
|
||||
|
||||
This may affect you if you have enabled the account validity feature, and have made use of a
|
||||
custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path``
|
||||
Synapse config options.
|
||||
|
||||
The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the
|
||||
future date of which their account has been renewed until. See the
|
||||
`default template <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_renewed.html>`_
|
||||
for an example of usage.
|
||||
|
||||
ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users
|
||||
when they attempt to renew their account with a valid renewal token that has already been used before. The default
|
||||
template contents can been found
|
||||
`here <https://github.com/matrix-org/synapse/blob/release-v1.33.0/synapse/res/templates/account_previously_renewed.html>`_,
|
||||
and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see
|
||||
upon attempting to use a valid renewal token more than once.
|
||||
|
||||
|
||||
Upgrading to v1.32.0
|
||||
====================
|
||||
|
||||
Regression causing connected Prometheus instances to become overwhelmed
|
||||
-----------------------------------------------------------------------
|
||||
|
||||
This release introduces `a regression <https://github.com/matrix-org/synapse/issues/9853>`_
|
||||
that can overwhelm connected Prometheus instances. This issue is not present in
|
||||
Synapse v1.32.0rc1.
|
||||
|
||||
If you have been affected, please downgrade to 1.31.0. You then may need to
|
||||
remove excess writeahead logs in order for Prometheus to recover. Instructions
|
||||
for doing so are provided
|
||||
`here <https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183>`_.
|
||||
|
||||
Dropping support for old Python, Postgres and SQLite versions
|
||||
-------------------------------------------------------------
|
||||
|
||||
In line with our `deprecation policy <https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md>`_,
|
||||
we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream.
|
||||
|
||||
This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+.
|
||||
|
||||
Removal of old List Accounts Admin API
|
||||
--------------------------------------
|
||||
|
||||
@@ -98,6 +170,16 @@ has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``G
|
||||
|
||||
The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
|
||||
|
||||
Application Services must use type ``m.login.application_service`` when registering users
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
In compliance with the
|
||||
`Application Service spec <https://matrix.org/docs/spec/application_service/r0.1.2#server-admin-style-permissions>`_,
|
||||
Application Services are now required to use the ``m.login.application_service`` type when registering users via the
|
||||
``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0.
|
||||
|
||||
Please ensure your Application Services are up to date.
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add a dockerfile for running Synapse in worker-mode under Complement.
|
||||
@@ -1 +0,0 @@
|
||||
Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan.
|
||||
@@ -1 +0,0 @@
|
||||
Apply `pyupgrade` across the codebase.
|
||||
@@ -1 +0,0 @@
|
||||
Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg.
|
||||
@@ -1 +0,0 @@
|
||||
Move some replication processing out of `generic_worker`.
|
||||
@@ -1 +0,0 @@
|
||||
Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
@@ -1 +0,0 @@
|
||||
Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms.
|
||||
@@ -1 +0,0 @@
|
||||
Replace `HomeServer.get_config()` with inline references.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
1
changelog.d/xxxx.misc
Normal file
1
changelog.d/xxxx.misc
Normal file
@@ -0,0 +1 @@
|
||||
Ensure python uses `malloc` when running Synapse in Docker.
|
||||
@@ -224,16 +224,14 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
@@ -255,7 +253,7 @@ class HomeServer(ReplicationHandler):
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdus([pdu])
|
||||
yield self.replication_layer.send_pdu(pdu)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@@ -267,18 +265,16 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
38
debian/changelog
vendored
38
debian/changelog
vendored
@@ -1,8 +1,42 @@
|
||||
matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.33.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 May 2021 11:17:59 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 May 2021 14:06:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 05 May 2021 14:15:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.32.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 22 Apr 2021 12:43:52 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.32.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 21 Apr 2021 14:00:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
|
||||
|
||||
-- Dan Callahan <danc@element.io> Mon, 12 Apr 2021 13:07:36 +0000
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.32.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Apr 2021 14:28:39 +0100
|
||||
|
||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
||||
|
||||
|
||||
@@ -96,18 +96,48 @@ for port in 8080 8081 8082; do
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
# messages rate limit
|
||||
echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config
|
||||
echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config
|
||||
|
||||
# registration rate limit
|
||||
printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
|
||||
# login rate limit
|
||||
echo 'rc_login:' >> $DIR/etc/$port.config
|
||||
printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
|
||||
# Disable any rate limiting
|
||||
ratelimiting=$(cat <<-RC
|
||||
rc_message:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_registration:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_login:
|
||||
address:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
account:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
failed_attempts:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_admin_redaction:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_joins:
|
||||
local:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
remote:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
RC
|
||||
)
|
||||
echo "${ratelimiting}" >> $DIR/etc/$port.config
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -191,6 +191,16 @@ whilst running the above `docker run` commands.
|
||||
```
|
||||
--no-healthcheck
|
||||
```
|
||||
|
||||
## Disabling the healthcheck in docker-compose file
|
||||
|
||||
If you wish to disable the healthcheck via docker-compose, append the following to your service configuration.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
disable: true
|
||||
```
|
||||
|
||||
## Setting custom healthcheck on docker run
|
||||
|
||||
If you wish to point the healthcheck at a different port with docker command, add the following
|
||||
@@ -202,14 +212,15 @@ If you wish to point the healthcheck at a different port with docker command, ad
|
||||
## Setting the healthcheck in docker-compose file
|
||||
|
||||
You can add the following to set a custom healthcheck in a docker compose file.
|
||||
You will need version >2.1 for this to work.
|
||||
You will need docker-compose version >2.1 for this to work.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
@@ -218,6 +218,7 @@ def main(args, environ):
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
environ["PYTHONMALLOC"] = "malloc"
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
|
||||
@@ -427,7 +427,7 @@ the new room. Users on other servers will be unaffected.
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id>
|
||||
```
|
||||
|
||||
with a body of:
|
||||
@@ -528,6 +528,15 @@ You will have to manually handle, if you so choose, the following:
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
## Deprecated endpoint
|
||||
|
||||
The previous deprecated API will be removed in a future release, it was:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
It behaves the same way than the current endpoint except the path and the method.
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
|
||||
@@ -152,6 +152,16 @@ presence:
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -731,6 +741,12 @@ acme:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -810,6 +826,7 @@ caches:
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
@@ -1175,69 +1192,6 @@ url_preview_accept_language:
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
@@ -1432,6 +1386,91 @@ account_threepid_delegates:
|
||||
#auto_join_rooms_for_guests: false
|
||||
|
||||
|
||||
## Account Validity ##
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
# The currently available templates are:
|
||||
#
|
||||
# * account_renewed.html: Displayed to the user after they have successfully
|
||||
# renewed their account.
|
||||
#
|
||||
# * account_previously_renewed.html: Displayed to the user if they attempt to
|
||||
# renew their account with a token that is valid, but that has already
|
||||
# been used. In this case the account is not renewed again.
|
||||
#
|
||||
# * invalid_token.html: Displayed to the user when they try to renew an account
|
||||
# with an unknown or invalid renewal token.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for
|
||||
# default template contents.
|
||||
#
|
||||
# The file name of some of these templates can be configured below for legacy
|
||||
# reasons.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# A custom file name for the 'account_renewed.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "account_renewed.html".
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# A custom file name for the 'invalid_token.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "invalid_token.html".
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
@@ -1482,6 +1521,7 @@ room_prejoin_state:
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
# - m.room.create
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
@@ -1878,7 +1918,7 @@ saml2_config:
|
||||
# sub-properties:
|
||||
#
|
||||
# module: The class name of a custom mapping module. Default is
|
||||
# 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
# 'synapse.handlers.oidc.JinjaOidcMappingProvider'.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
|
||||
@@ -106,7 +106,7 @@ A custom mapping provider must specify the following methods:
|
||||
|
||||
Synapse has a built-in OpenID mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py).
|
||||
[`synapse.handlers.oidc.JinjaOidcMappingProvider`](../synapse/handlers/oidc.py).
|
||||
|
||||
## SAML Mapping Providers
|
||||
|
||||
@@ -190,4 +190,4 @@ A custom mapping provider must specify the following methods:
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
|
||||
[`synapse.handlers.saml.DefaultSamlMappingProvider`](../synapse/handlers/saml.py).
|
||||
|
||||
@@ -216,6 +216,10 @@ Asks the server for the current position of all streams.
|
||||
|
||||
This is used when a worker is shutting down.
|
||||
|
||||
#### FEDERATION_ACK (C)
|
||||
|
||||
Acknowledge receipt of some federation data
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
|
||||
4
mypy.ini
4
mypy.ini
@@ -41,7 +41,6 @@ files =
|
||||
synapse/push,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
@@ -172,3 +171,6 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-txacme.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -21,9 +21,10 @@ DISTS = (
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:focal",
|
||||
"ubuntu:groovy",
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
)
|
||||
|
||||
DESC = '''\
|
||||
|
||||
@@ -140,7 +140,7 @@ if __name__ == "__main__":
|
||||
|
||||
definitions = {}
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# It does so by having Synapse generate an up-to-date SQLite DB, then running
|
||||
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
|
||||
|
||||
POSTGRES_HOST="localhost"
|
||||
export PGHOST="localhost"
|
||||
POSTGRES_DB_NAME="synapse_full_schema.$$"
|
||||
|
||||
SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
|
||||
@@ -32,7 +32,7 @@ usage() {
|
||||
while getopts "p:co:h" opt; do
|
||||
case $opt in
|
||||
p)
|
||||
POSTGRES_USERNAME=$OPTARG
|
||||
export PGUSER=$OPTARG
|
||||
;;
|
||||
c)
|
||||
# Print all commands that are being executed
|
||||
@@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$POSTGRES_USERNAME" ]; then
|
||||
if [ -z "$PGUSER" ]; then
|
||||
echo "No postgres username supplied"
|
||||
usage
|
||||
exit 1
|
||||
@@ -84,8 +84,9 @@ fi
|
||||
# Create the output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
|
||||
read -rsp "Postgres password for '$PGUSER': " PGPASSWORD
|
||||
echo ""
|
||||
export PGPASSWORD
|
||||
|
||||
# Exit immediately if a command fails
|
||||
set -e
|
||||
@@ -131,9 +132,9 @@ report_stats: false
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "$POSTGRES_USERNAME"
|
||||
host: "$POSTGRES_HOST"
|
||||
password: "$POSTGRES_PASSWORD"
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_DB_NAME"
|
||||
|
||||
# Suppress the key server warning.
|
||||
@@ -150,7 +151,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
createdb $POSTGRES_DB_NAME
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
@@ -181,7 +182,7 @@ DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
@@ -634,8 +634,11 @@ class Porter(object):
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data"))
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized", ))
|
||||
"account_data_sequence",
|
||||
("room_account_data", "room_tags_revisions", "account_data"),
|
||||
)
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
|
||||
# Step 3. Get tables.
|
||||
@@ -910,10 +913,11 @@ class Porter(object):
|
||||
(curr_forward_id + 1,),
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
if curr_backward_id:
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
|
||||
@@ -951,10 +955,11 @@ class Porter(object):
|
||||
(curr_chain_id,),
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id", r,
|
||||
)
|
||||
|
||||
if curr_chain_id is not None:
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id",
|
||||
r,
|
||||
)
|
||||
|
||||
|
||||
##############################################
|
||||
|
||||
@@ -18,8 +18,7 @@ ignore =
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
# B007: Subsection of the bugbear suite (TODO: add in remaining fixes)
|
||||
ignore=W503,W504,E203,E731,E501,B007
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
|
||||
[isort]
|
||||
line_length = 88
|
||||
|
||||
@@ -21,8 +21,8 @@ import os
|
||||
import sys
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
if sys.version_info < (3, 6):
|
||||
print("Synapse requires Python 3.6 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.32.0rc1"
|
||||
__version__ = "1.34.0rc1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -12,14 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
|
||||
@@ -36,11 +35,14 @@ from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging import opentracing as opentracing
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.types import Requester, StateMap, UserID, create_requester
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -65,9 +67,10 @@ class Auth:
|
||||
"""
|
||||
FIXME: This class contains a mix of functions for authenticating users
|
||||
of our client-server API and authenticating events added to room graphs.
|
||||
The latter should be moved to synapse.handlers.event_auth.EventAuthHandler.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
@@ -79,19 +82,21 @@ class Auth:
|
||||
|
||||
self._auth_blocking = AuthBlocking(self.hs)
|
||||
|
||||
self._account_validity = hs.config.account_validity
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
):
|
||||
) -> None:
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||
auth_events_by_id = await self.store.get_events(auth_events_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
@@ -148,17 +153,11 @@ class Auth:
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
async def check_host_in_room(self, room_id, host):
|
||||
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = await self.store.is_host_joined(room_id, host)
|
||||
return latest_event_ids
|
||||
return await self.store.is_host_joined(room_id, host)
|
||||
|
||||
def can_federate(self, event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
return creation_event.content.get("m.federate", True) is True
|
||||
|
||||
def get_public_keys(self, invite_event):
|
||||
def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
async def get_user_by_req(
|
||||
@@ -167,7 +166,7 @@ class Auth:
|
||||
allow_guest: bool = False,
|
||||
rights: str = "access",
|
||||
allow_expired: bool = False,
|
||||
) -> synapse.types.Requester:
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -193,7 +192,7 @@ class Auth:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = await self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
if user_id and app_service:
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
await self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
@@ -203,9 +202,7 @@ class Auth:
|
||||
device_id="dummy-device", # stubbed
|
||||
)
|
||||
|
||||
requester = synapse.types.create_requester(
|
||||
user_id, app_service=app_service
|
||||
)
|
||||
requester = create_requester(user_id, app_service=app_service)
|
||||
|
||||
request.requester = user_id
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
@@ -222,7 +219,7 @@ class Auth:
|
||||
shadow_banned = user_info.shadow_banned
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
if self._account_validity_enabled and not allow_expired:
|
||||
if await self.store.is_account_expired(
|
||||
user_info.user_id, self.clock.time_msec()
|
||||
):
|
||||
@@ -248,7 +245,7 @@ class Auth:
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
requester = synapse.types.create_requester(
|
||||
requester = create_requester(
|
||||
user_info.user_id,
|
||||
token_id,
|
||||
is_guest,
|
||||
@@ -268,7 +265,9 @@ class Auth:
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
async def _get_appservice_user_id(self, request):
|
||||
async def _get_appservice_user_id(
|
||||
self, request: Request
|
||||
) -> Tuple[Optional[str], Optional[ApplicationService]]:
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
@@ -280,6 +279,9 @@ class Auth:
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
return app_service.sender, app_service
|
||||
|
||||
@@ -384,7 +386,9 @@ class Auth:
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||
def _parse_and_validate_macaroon(
|
||||
self, token: str, rights: str = "access"
|
||||
) -> Tuple[str, bool]:
|
||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||
if and only if rights == access and there isn't an expiry.
|
||||
|
||||
@@ -429,15 +433,16 @@ class Auth:
|
||||
|
||||
return user_id, guest
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
def validate_macaroon(
|
||||
self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str
|
||||
) -> None:
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
Args:
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token required (e.g. "access",
|
||||
"delete_pusher")
|
||||
user_id (str): The user_id required
|
||||
macaroon: The macaroon to validate
|
||||
type_string: The kind of token required (e.g. "access", "delete_pusher")
|
||||
user_id: The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
|
||||
@@ -462,9 +467,7 @@ class Auth:
|
||||
if not service:
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.requester = synapse.types.create_requester(
|
||||
service.sender, app_service=service
|
||||
)
|
||||
request.requester = create_requester(service.sender, app_service=service)
|
||||
return service
|
||||
|
||||
async def is_server_admin(self, user: UserID) -> bool:
|
||||
@@ -516,7 +519,7 @@ class Auth:
|
||||
|
||||
return auth_ids
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
@@ -551,11 +554,11 @@ class Auth:
|
||||
return user_level >= send_level
|
||||
|
||||
@staticmethod
|
||||
def has_access_token(request: Request):
|
||||
def has_access_token(request: Request) -> bool:
|
||||
"""Checks if the request has an access_token.
|
||||
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
False if no access_token was given, True otherwise.
|
||||
"""
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
@@ -565,13 +568,13 @@ class Auth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request: Request):
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
The access_token
|
||||
Raises:
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
@@ -646,5 +649,5 @@ class Auth:
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
def check_auth_blocking(self, *args, **kwargs):
|
||||
return self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
async def check_auth_blocking(self, *args, **kwargs) -> None:
|
||||
await self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
|
||||
@@ -13,18 +13,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthBlocking:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
@@ -43,7 +46,7 @@ class AuthBlocking:
|
||||
threepid: Optional[dict] = None,
|
||||
user_type: Optional[str] = None,
|
||||
requester: Optional[Requester] = None,
|
||||
):
|
||||
) -> None:
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2 ** 63 - 1
|
||||
|
||||
@@ -107,13 +110,18 @@ class EventTypes:
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
SpaceChild = "m.space.child"
|
||||
SpaceParent = "m.space.parent"
|
||||
MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child"
|
||||
MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class RejectedReason:
|
||||
@@ -171,6 +179,7 @@ class EventContentFields:
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/1772
|
||||
ROOM_TYPE = "type"
|
||||
MSC1772_ROOM_TYPE = "org.matrix.msc1772.type"
|
||||
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
@@ -76,6 +77,9 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -124,17 +128,20 @@ class Ratelimiter:
|
||||
time_delta = time_now_s - time_start
|
||||
performed_count = action_count - time_delta * rate_hz
|
||||
if performed_count < 0:
|
||||
# Allow, reset back to count 1
|
||||
allowed = True
|
||||
performed_count = 0
|
||||
time_start = time_now_s
|
||||
action_count = 1.0
|
||||
elif performed_count > burst_count - 1.0:
|
||||
|
||||
# This check would be easier read as performed_count + n_actions > burst_count,
|
||||
# but performed_count might be a very precise float (with lots of numbers
|
||||
# following the point) in which case Python might round it up when adding it to
|
||||
# n_actions. Writing it this way ensures it doesn't happen.
|
||||
if performed_count > burst_count - n_actions:
|
||||
# Deny, we have exceeded our burst count
|
||||
allowed = False
|
||||
else:
|
||||
# We haven't reached our limit yet
|
||||
allowed = True
|
||||
action_count += 1.0
|
||||
action_count = performed_count + n_actions
|
||||
|
||||
if update:
|
||||
self.actions[key] = (action_count, time_start, rate_hz)
|
||||
@@ -182,6 +189,7 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
@@ -201,6 +209,9 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -216,6 +227,7 @@ class Ratelimiter:
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
_time_now_s=time_now_s,
|
||||
)
|
||||
|
||||
|
||||
@@ -30,12 +30,14 @@ from twisted.internet import defer, error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
@@ -114,6 +116,7 @@ def start_reactor(
|
||||
|
||||
def run():
|
||||
logger.info("Running")
|
||||
setup_jemalloc_stats()
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
@@ -288,7 +291,7 @@ def refresh_certificate(hs):
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
||||
async def start(hs: "synapse.server.HomeServer"):
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
@@ -300,7 +303,6 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon
|
||||
|
||||
Args:
|
||||
hs: homeserver instance
|
||||
listeners: Listener configuration ('listeners' in homeserver.yaml)
|
||||
"""
|
||||
# Set up the SIGHUP machinery.
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
@@ -336,7 +338,7 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon
|
||||
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.start_listening()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
hs.get_pusherpool().start()
|
||||
|
||||
@@ -530,3 +532,25 @@ def sdnotify(state):
|
||||
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
|
||||
# unless systemd is expecting us to notify it.
|
||||
logger.warning("Unable to send notification to systemd: %s", e)
|
||||
|
||||
|
||||
def max_request_body_size(config: HomeServerConfig) -> int:
|
||||
"""Get a suitable maximum size for incoming HTTP requests"""
|
||||
|
||||
# Other than media uploads, the biggest request we expect to see is a fully-loaded
|
||||
# /federation/v1/send request.
|
||||
#
|
||||
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
|
||||
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
|
||||
# json encoding); there is no specced limit to EDUs (see
|
||||
# https://github.com/matrix-org/matrix-doc/issues/3121).
|
||||
#
|
||||
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
|
||||
#
|
||||
max_request_size = 200 * MAX_PDU_SIZE
|
||||
|
||||
# if we have a media repo enabled, we may need to allow larger uploads than that
|
||||
if config.media.can_load_media_repo:
|
||||
max_request_size = max(max_request_size, config.media.max_upload_size)
|
||||
|
||||
return max_request_size
|
||||
|
||||
@@ -70,12 +70,6 @@ class AdminCmdSlavedStore(
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
pass
|
||||
|
||||
def start_listening(self, listeners):
|
||||
pass
|
||||
|
||||
|
||||
async def export_data_command(hs, args):
|
||||
"""Export data for a user.
|
||||
@@ -232,7 +226,7 @@ def start(config_options):
|
||||
|
||||
async def run():
|
||||
with LoggingContext("command"):
|
||||
_base.start(ss, [])
|
||||
_base.start(ss)
|
||||
await args.func(ss, args)
|
||||
|
||||
_base.start_worker_reactor(
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, Iterable, Optional
|
||||
from typing import Dict, Optional
|
||||
|
||||
from twisted.internet import address
|
||||
from twisted.web.resource import IResource
|
||||
@@ -32,7 +32,7 @@ from synapse.api.urls import (
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import register_start
|
||||
from synapse.app._base import max_request_body_size, register_start
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
@@ -55,7 +55,6 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
@@ -64,7 +63,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events, login, room
|
||||
from synapse.rest.client.v1 import events, login, presence, room
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.rest.client.v1.profile import (
|
||||
ProfileAvatarURLRestServlet,
|
||||
@@ -110,6 +109,7 @@ from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
from synapse.storage.databases.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
@@ -121,26 +121,6 @@ from synapse.util.versionstring import get_version_string
|
||||
logger = logging.getLogger("synapse.app.generic_worker")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(RestServlet):
|
||||
"""If presence is disabled this servlet can be used to stub out setting
|
||||
presence status.
|
||||
"""
|
||||
|
||||
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_GET(self, request, user_id):
|
||||
await self.auth.get_user_by_req(request)
|
||||
return 200, {"presence": "offline"}
|
||||
|
||||
async def on_PUT(self, request, user_id):
|
||||
await self.auth.get_user_by_req(request)
|
||||
return 200, {}
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
"""An implementation of the `KeyUploadServlet` that responds to read only
|
||||
requests, but otherwise proxies through to the master instance.
|
||||
@@ -241,6 +221,7 @@ class GenericWorkerSlavedStore(
|
||||
StatsStore,
|
||||
UIAuthWorkerStore,
|
||||
EndToEndRoomKeyStore,
|
||||
PresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
@@ -259,7 +240,6 @@ class GenericWorkerSlavedStore(
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedFilteringStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
@@ -327,10 +307,7 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
user_directory.register_servlets(self, resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
@@ -390,14 +367,16 @@ class GenericWorkerServer(HomeServer):
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
for listener in listeners:
|
||||
def start_listening(self):
|
||||
for listener in self.config.worker_listeners:
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
@@ -475,6 +454,10 @@ def start(config_options):
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
config.server_name,
|
||||
@@ -490,7 +473,7 @@ def start(config_options):
|
||||
# streams. Will no-op if no streams can be written to by this worker.
|
||||
hs.get_replication_streamer()
|
||||
|
||||
register_start(_base.start, hs, config.worker_listeners)
|
||||
register_start(_base.start, hs)
|
||||
|
||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Iterable, Iterator
|
||||
from typing import Iterator
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, IResource
|
||||
@@ -36,7 +36,13 @@ from synapse.api.urls import (
|
||||
WEB_CLIENT_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error, register_start
|
||||
from synapse.app._base import (
|
||||
listen_ssl,
|
||||
listen_tcp,
|
||||
max_request_body_size,
|
||||
quit_with_error,
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
@@ -126,19 +132,21 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
root_resource = OptionsResource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
site = SynapseSite(
|
||||
"synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
|
||||
site_tag,
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
max_request_body_size=max_request_body_size(self.config),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
|
||||
if tls:
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
site,
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
@@ -148,13 +156,7 @@ class SynapseHomeServer(HomeServer):
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
site,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
@@ -273,14 +275,14 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
return resources
|
||||
|
||||
def start_listening(self, listeners: Iterable[ListenerConfig]):
|
||||
def start_listening(self):
|
||||
if self.config.redis_enabled:
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
for listener in listeners:
|
||||
for listener in self.config.server.listeners:
|
||||
if listener.type == "http":
|
||||
self._listening_services.extend(
|
||||
self._listener_http(self.config, listener)
|
||||
@@ -339,6 +341,10 @@ def setup(config_options):
|
||||
sys.exit(0)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
@@ -412,7 +418,7 @@ def setup(config_options):
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
|
||||
await _base.start(hs, config.listeners)
|
||||
await _base.start(hs)
|
||||
|
||||
hs.get_datastore().db_pool.updates.start_doing_background_updates()
|
||||
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
from typing import Any, Iterable, List, Optional
|
||||
|
||||
from synapse.config import (
|
||||
account_validity,
|
||||
api,
|
||||
appservice,
|
||||
auth,
|
||||
captcha,
|
||||
cas,
|
||||
consent_config,
|
||||
consent,
|
||||
database,
|
||||
emailconfig,
|
||||
experimental,
|
||||
groups,
|
||||
jwt_config,
|
||||
jwt,
|
||||
key,
|
||||
logger,
|
||||
metrics,
|
||||
oidc_config,
|
||||
oidc,
|
||||
password_auth_providers,
|
||||
push,
|
||||
ratelimiting,
|
||||
@@ -23,9 +24,9 @@ from synapse.config import (
|
||||
registration,
|
||||
repository,
|
||||
room_directory,
|
||||
saml2_config,
|
||||
saml2,
|
||||
server,
|
||||
server_notices_config,
|
||||
server_notices,
|
||||
spam_checker,
|
||||
sso,
|
||||
stats,
|
||||
@@ -59,15 +60,16 @@ class RootConfig:
|
||||
captcha: captcha.CaptchaConfig
|
||||
voip: voip.VoipConfig
|
||||
registration: registration.RegistrationConfig
|
||||
account_validity: account_validity.AccountValidityConfig
|
||||
metrics: metrics.MetricsConfig
|
||||
api: api.ApiConfig
|
||||
appservice: appservice.AppServiceConfig
|
||||
key: key.KeyConfig
|
||||
saml2: saml2_config.SAML2Config
|
||||
saml2: saml2.SAML2Config
|
||||
cas: cas.CasConfig
|
||||
sso: sso.SSOConfig
|
||||
oidc: oidc_config.OIDCConfig
|
||||
jwt: jwt_config.JWTConfig
|
||||
oidc: oidc.OIDCConfig
|
||||
jwt: jwt.JWTConfig
|
||||
auth: auth.AuthConfig
|
||||
email: emailconfig.EmailConfig
|
||||
worker: workers.WorkerConfig
|
||||
@@ -76,9 +78,9 @@ class RootConfig:
|
||||
spamchecker: spam_checker.SpamCheckerConfig
|
||||
groups: groups.GroupsConfig
|
||||
userdirectory: user_directory.UserDirectoryConfig
|
||||
consent: consent_config.ConsentConfig
|
||||
consent: consent.ConsentConfig
|
||||
stats: stats.StatsConfig
|
||||
servernotices: server_notices_config.ServerNoticesConfig
|
||||
servernotices: server_notices.ServerNoticesConfig
|
||||
roomdirectory: room_directory.RoomDirectoryConfig
|
||||
thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig
|
||||
tracer: tracer.TracerConfig
|
||||
|
||||
165
synapse/config/account_validity.py
Normal file
165
synapse/config/account_validity.py
Normal file
@@ -0,0 +1,165 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.config._base import Config, ConfigError
|
||||
|
||||
|
||||
class AccountValidityConfig(Config):
|
||||
section = "account_validity"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
account_validity_config = config.get("account_validity") or {}
|
||||
self.account_validity_enabled = account_validity_config.get("enabled", False)
|
||||
self.account_validity_renew_by_email_enabled = (
|
||||
"renew_at" in account_validity_config
|
||||
)
|
||||
|
||||
if self.account_validity_enabled:
|
||||
if "period" in account_validity_config:
|
||||
self.account_validity_period = self.parse_duration(
|
||||
account_validity_config["period"]
|
||||
)
|
||||
else:
|
||||
raise ConfigError("'period' is required when using account validity")
|
||||
|
||||
if "renew_at" in account_validity_config:
|
||||
self.account_validity_renew_at = self.parse_duration(
|
||||
account_validity_config["renew_at"]
|
||||
)
|
||||
|
||||
if "renew_email_subject" in account_validity_config:
|
||||
self.account_validity_renew_email_subject = account_validity_config[
|
||||
"renew_email_subject"
|
||||
]
|
||||
else:
|
||||
self.account_validity_renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
self.account_validity_startup_job_max_delta = (
|
||||
self.account_validity_period * 10.0 / 100.0
|
||||
)
|
||||
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
if not self.public_baseurl:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
# Load account validity templates.
|
||||
account_validity_template_dir = account_validity_config.get("template_dir")
|
||||
|
||||
account_renewed_template_filename = account_validity_config.get(
|
||||
"account_renewed_html_path", "account_renewed.html"
|
||||
)
|
||||
invalid_token_template_filename = account_validity_config.get(
|
||||
"invalid_token_html_path", "invalid_token.html"
|
||||
)
|
||||
|
||||
# Read and store template content
|
||||
(
|
||||
self.account_validity_account_renewed_template,
|
||||
self.account_validity_account_previously_renewed_template,
|
||||
self.account_validity_invalid_token_template,
|
||||
) = self.read_templates(
|
||||
[
|
||||
account_renewed_template_filename,
|
||||
"account_previously_renewed.html",
|
||||
invalid_token_template_filename,
|
||||
],
|
||||
account_validity_template_dir,
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
## Account Validity ##
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
# The currently available templates are:
|
||||
#
|
||||
# * account_renewed.html: Displayed to the user after they have successfully
|
||||
# renewed their account.
|
||||
#
|
||||
# * account_previously_renewed.html: Displayed to the user if they attempt to
|
||||
# renew their account with a token that is valid, but that has already
|
||||
# been used. In this case the account is not renewed again.
|
||||
#
|
||||
# * invalid_token.html: Displayed to the user when they try to renew an account
|
||||
# with an unknown or invalid renewal token.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for
|
||||
# default template contents.
|
||||
#
|
||||
# The file name of some of these templates can be configured below for legacy
|
||||
# reasons.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# A custom file name for the 'account_renewed.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "account_renewed.html".
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# A custom file name for the 'invalid_token.html' template.
|
||||
#
|
||||
# If not set, the file is assumed to be named "invalid_token.html".
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
"""
|
||||
@@ -88,10 +88,6 @@ class ApiConfig(Config):
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
|
||||
if self.spaces_enabled:
|
||||
# MSC1772 suggests adding m.room.create to the prejoin state
|
||||
yield EventTypes.Create
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
|
||||
|
||||
@@ -109,6 +105,8 @@ _DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
# Per MSC1772.
|
||||
EventTypes.Create,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ import re
|
||||
import threading
|
||||
from typing import Callable, Dict
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
# The prefix for all cache factor-related environment variables
|
||||
@@ -189,6 +191,15 @@ class CacheConfig(Config):
|
||||
)
|
||||
self.cache_factors[cache] = factor
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
try:
|
||||
check_requirements("cache_memory")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ DEFAULT_CONFIG = """\
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
|
||||
@@ -299,7 +299,7 @@ class EmailConfig(Config):
|
||||
"client_base_url", email_config.get("riot_base_url", None)
|
||||
)
|
||||
|
||||
if self.account_validity.renew_by_email_enabled:
|
||||
if self.account_validity_renew_by_email_enabled:
|
||||
expiry_template_html = email_config.get(
|
||||
"expiry_template_html", "notice_expiry.html"
|
||||
)
|
||||
|
||||
@@ -44,6 +44,10 @@ class FederationConfig(Config):
|
||||
"allow_profile_lookup_over_federation", True
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
## Federation ##
|
||||
@@ -75,6 +79,12 @@ class FederationConfig(Config):
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -12,25 +12,25 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import RootConfig
|
||||
from .account_validity import AccountValidityConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .auth import AuthConfig
|
||||
from .cache import CacheConfig
|
||||
from .captcha import CaptchaConfig
|
||||
from .cas import CasConfig
|
||||
from .consent_config import ConsentConfig
|
||||
from .consent import ConsentConfig
|
||||
from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .experimental import ExperimentalConfig
|
||||
from .federation import FederationConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt_config import JWTConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .oidc_config import OIDCConfig
|
||||
from .oidc import OIDCConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .push import PushConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
@@ -39,9 +39,9 @@ from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .room import RoomConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2_config import SAML2Config
|
||||
from .saml2 import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .server_notices import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .sso import SSOConfig
|
||||
from .stats import StatsConfig
|
||||
@@ -68,6 +68,7 @@ class HomeServerConfig(RootConfig):
|
||||
CaptchaConfig,
|
||||
VoipConfig,
|
||||
RegistrationConfig,
|
||||
AccountValidityConfig,
|
||||
MetricsConfig,
|
||||
ApiConfig,
|
||||
AppServiceConfig,
|
||||
|
||||
@@ -31,7 +31,6 @@ from twisted.logger import (
|
||||
)
|
||||
|
||||
import synapse
|
||||
from synapse.app import _base as appbase
|
||||
from synapse.logging._structured import setup_structured_logging
|
||||
from synapse.logging.context import LoggingContextFilter
|
||||
from synapse.logging.filter import MetadataFilter
|
||||
@@ -318,6 +317,8 @@ def setup_logging(
|
||||
# Perform one-time logging configuration.
|
||||
_setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner)
|
||||
# Add a SIGHUP handler to reload the logging configuration, if one is available.
|
||||
from synapse.app import _base as appbase
|
||||
|
||||
appbase.register_sighup(_reload_logging_config, log_config_path)
|
||||
|
||||
# Log immediately so we can grep backwards.
|
||||
|
||||
@@ -14,20 +14,23 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import Counter
|
||||
from typing import Iterable, List, Mapping, Optional, Tuple, Type
|
||||
from typing import Collection, Iterable, List, Mapping, Optional, Tuple, Type
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.types import Collection, JsonDict
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
|
||||
# The module that JinjaOidcMappingProvider is in was renamed, we want to
|
||||
# transparently handle both the same.
|
||||
LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
|
||||
|
||||
|
||||
class OIDCConfig(Config):
|
||||
@@ -403,6 +406,8 @@ def _parse_oidc_config_dict(
|
||||
"""
|
||||
ump_config = oidc_config.get("user_mapping_provider", {})
|
||||
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER:
|
||||
ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER
|
||||
ump_config.setdefault("config", {})
|
||||
|
||||
(
|
||||
@@ -12,74 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.types import RoomAlias, UserID
|
||||
from synapse.util.stringutils import random_string_with_symbols, strtobool
|
||||
|
||||
|
||||
class AccountValidityConfig(Config):
|
||||
section = "accountvalidity"
|
||||
|
||||
def __init__(self, config, synapse_config):
|
||||
if config is None:
|
||||
return
|
||||
super().__init__()
|
||||
self.enabled = config.get("enabled", False)
|
||||
self.renew_by_email_enabled = "renew_at" in config
|
||||
|
||||
if self.enabled:
|
||||
if "period" in config:
|
||||
self.period = self.parse_duration(config["period"])
|
||||
else:
|
||||
raise ConfigError("'period' is required when using account validity")
|
||||
|
||||
if "renew_at" in config:
|
||||
self.renew_at = self.parse_duration(config["renew_at"])
|
||||
|
||||
if "renew_email_subject" in config:
|
||||
self.renew_email_subject = config["renew_email_subject"]
|
||||
else:
|
||||
self.renew_email_subject = "Renew your %(app)s account"
|
||||
|
||||
self.startup_job_max_delta = self.period * 10.0 / 100.0
|
||||
|
||||
if self.renew_by_email_enabled:
|
||||
if "public_baseurl" not in synapse_config:
|
||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||
|
||||
template_dir = config.get("template_dir")
|
||||
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
|
||||
|
||||
if "account_renewed_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["account_renewed_html_path"])
|
||||
|
||||
self.account_renewed_html_content = self.read_file(
|
||||
file_path, "account_validity.account_renewed_html_path"
|
||||
)
|
||||
else:
|
||||
self.account_renewed_html_content = (
|
||||
"<html><body>Your account has been successfully renewed.</body><html>"
|
||||
)
|
||||
|
||||
if "invalid_token_html_path" in config:
|
||||
file_path = os.path.join(template_dir, config["invalid_token_html_path"])
|
||||
|
||||
self.invalid_token_html_content = self.read_file(
|
||||
file_path, "account_validity.invalid_token_html_path"
|
||||
)
|
||||
else:
|
||||
self.invalid_token_html_content = (
|
||||
"<html><body>Invalid renewal token.</body><html>"
|
||||
)
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
section = "registration"
|
||||
|
||||
@@ -92,10 +30,6 @@ class RegistrationConfig(Config):
|
||||
str(config["disable_registration"])
|
||||
)
|
||||
|
||||
self.account_validity = AccountValidityConfig(
|
||||
config.get("account_validity") or {}, config
|
||||
)
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
|
||||
@@ -207,69 +141,6 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#enable_registration: false
|
||||
|
||||
# Optional account validity configuration. This allows for accounts to be denied
|
||||
# any request after a given period.
|
||||
#
|
||||
# Once this feature is enabled, Synapse will look for registered users without an
|
||||
# expiration date at startup and will add one to every account it found using the
|
||||
# current settings at that time.
|
||||
# This means that, if a validity period is set, and Synapse is restarted (it will
|
||||
# then derive an expiration date from the current validity period), and some time
|
||||
# after that the validity period changes and Synapse is restarted, the users'
|
||||
# expiration dates won't be updated unless their account is manually renewed. This
|
||||
# date will be randomly selected within a range [now + period - d ; now + period],
|
||||
# where d is equal to 10%% of the validity period.
|
||||
#
|
||||
account_validity:
|
||||
# The account validity feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The period after which an account is valid after its registration. When
|
||||
# renewing the account, its validity period will be extended by this amount
|
||||
# of time. This parameter is required when using the account validity
|
||||
# feature.
|
||||
#
|
||||
#period: 6w
|
||||
|
||||
# The amount of time before an account's expiry date at which Synapse will
|
||||
# send an email to the account's email address with a renewal link. By
|
||||
# default, no such emails are sent.
|
||||
#
|
||||
# If you enable this setting, you will also need to fill out the 'email' and
|
||||
# 'public_baseurl' configuration sections.
|
||||
#
|
||||
#renew_at: 1w
|
||||
|
||||
# The subject of the email sent out with the renewal link. '%%(app)s' can be
|
||||
# used as a placeholder for the 'app_name' parameter from the 'email'
|
||||
# section.
|
||||
#
|
||||
# Note that the placeholder must be written '%%(app)s', including the
|
||||
# trailing 's'.
|
||||
#
|
||||
# If this is not set, a default value is used.
|
||||
#
|
||||
#renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
# Directory in which Synapse will try to find templates for the HTML files to
|
||||
# serve to the user when trying to renew an account. If not set, default
|
||||
# templates from within the Synapse package will be used.
|
||||
#
|
||||
#template_dir: "res/templates"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed to the user after
|
||||
# they successfully renewed their account. If not set, default text is used.
|
||||
#
|
||||
#account_renewed_html_path: "account_renewed.html"
|
||||
|
||||
# File within 'template_dir' giving the HTML to be displayed when the user
|
||||
# tries to renew an account with an invalid renewal token. If not set,
|
||||
# default text is used.
|
||||
#
|
||||
#invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
|
||||
@@ -25,7 +25,10 @@ from ._util import validate_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = (
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider"
|
||||
# The module that DefaultSamlMappingProvider is in was renamed, we want to
|
||||
# transparently handle both the same.
|
||||
LEGACY_USER_MAPPING_PROVIDER = (
|
||||
"synapse.handlers.saml_handler.DefaultSamlMappingProvider"
|
||||
)
|
||||
|
||||
@@ -97,6 +100,8 @@ class SAML2Config(Config):
|
||||
|
||||
# Use the default user mapping provider if not set
|
||||
ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER:
|
||||
ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER
|
||||
|
||||
# Ensure a config is present
|
||||
ump_dict["config"] = ump_dict.get("config") or {}
|
||||
@@ -19,7 +19,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -235,7 +235,11 @@ class ServerConfig(Config):
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
|
||||
# Whether to enable user presence.
|
||||
presence_config = config.get("presence") or {}
|
||||
@@ -407,10 +411,6 @@ class ServerConfig(Config):
|
||||
config_path=("federation_ip_range_blacklist",),
|
||||
)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != "/":
|
||||
self.public_baseurl += "/"
|
||||
|
||||
# (undocumented) option for torturing the worker-mode replication a bit,
|
||||
# for testing. The value defines the number of milliseconds to pause before
|
||||
# sending out any replication updates.
|
||||
@@ -572,6 +572,7 @@ class ServerConfig(Config):
|
||||
_warn_if_webclient_configured(self.listeners)
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig:
|
||||
@@ -917,6 +918,16 @@ class ServerConfig(Config):
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -1305,6 +1316,24 @@ class ServerConfig(Config):
|
||||
help="Turn on the twisted telnet manhole service on the given port.",
|
||||
)
|
||||
|
||||
def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]:
|
||||
"""Reads the three durations for the GC min interval option, returning seconds."""
|
||||
if durations is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
if len(durations) != 3:
|
||||
raise ValueError()
|
||||
return (
|
||||
self.parse_duration(durations[0]) / 1000,
|
||||
self.parse_duration(durations[1]) / 1000,
|
||||
self.parse_duration(durations[2]) / 1000,
|
||||
)
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
"Value of `gc_min_interval` must be a list of three durations if set"
|
||||
)
|
||||
|
||||
|
||||
def is_threepid_reserved(reserved_threepids, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
|
||||
@@ -17,7 +17,7 @@ import os
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Pattern
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
@@ -124,7 +124,7 @@ class TlsConfig(Config):
|
||||
fed_whitelist_entries = []
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[str]
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
|
||||
for entry in fed_whitelist_entries:
|
||||
try:
|
||||
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
|
||||
|
||||
@@ -64,6 +64,14 @@ class WriterLocations:
|
||||
Attributes:
|
||||
events: The instances that write to the event and backfill streams.
|
||||
typing: The instance that writes to the typing stream.
|
||||
to_device: The instances that write to the to_device stream. Currently
|
||||
can only be a single instance.
|
||||
account_data: The instances that write to the account data streams. Currently
|
||||
can only be a single instance.
|
||||
receipts: The instances that write to the receipts stream. Currently
|
||||
can only be a single instance.
|
||||
presence: The instances that write to the presence stream. Currently
|
||||
can only be a single instance.
|
||||
"""
|
||||
|
||||
events = attr.ib(
|
||||
@@ -85,6 +93,11 @@ class WriterLocations:
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
presence = attr.ib(
|
||||
default=["master"],
|
||||
type=List[str],
|
||||
converter=_instance_to_list_converter,
|
||||
)
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
@@ -188,7 +201,14 @@ class WorkerConfig(Config):
|
||||
|
||||
# Check that the configured writers for events and typing also appears in
|
||||
# `instance_map`.
|
||||
for stream in ("events", "typing", "to_device", "account_data", "receipts"):
|
||||
for stream in (
|
||||
"events",
|
||||
"typing",
|
||||
"to_device",
|
||||
"account_data",
|
||||
"receipts",
|
||||
"presence",
|
||||
):
|
||||
instances = _instance_to_list_converter(getattr(self.writers, stream))
|
||||
for instance in instances:
|
||||
if instance != "master" and instance not in self.instance_map:
|
||||
@@ -215,6 +235,11 @@ class WorkerConfig(Config):
|
||||
if len(self.writers.events) == 0:
|
||||
raise ConfigError("Must specify at least one instance to handle `events`.")
|
||||
|
||||
if len(self.writers.presence) != 1:
|
||||
raise ConfigError(
|
||||
"Must only specify one instance to handle `presence` messages."
|
||||
)
|
||||
|
||||
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
|
||||
self.writers.events
|
||||
)
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import List, Optional, Set, Tuple
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.constants import MAX_PDU_SIZE, EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
@@ -205,7 +205,7 @@ def _check_size_limits(event: EventBase) -> None:
|
||||
too_big("type")
|
||||
if len(event.event_id) > 255:
|
||||
too_big("event_id")
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
|
||||
too_big("event")
|
||||
|
||||
|
||||
@@ -670,7 +670,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
for key_name in signature_block.keys():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
@@ -688,7 +688,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase
|
||||
return False
|
||||
|
||||
|
||||
def get_public_keys(invite_event):
|
||||
def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {"public_key": invite_event.content["public_key"]}
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import Collection
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -114,7 +114,9 @@ class SpamChecker:
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
@@ -451,6 +451,28 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
# There is no good way to detect an "unknown" endpoint.
|
||||
#
|
||||
# Dendrite returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
return e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
@@ -468,9 +490,9 @@ class FederationClient(FederationBase):
|
||||
callback: Function to run for each server. Passed a single
|
||||
argument: the server_name to try.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
reraised.
|
||||
If the callback raises a CodeMessageException with a 300/400 code or
|
||||
an UnsupportedRoomVersionError, attempts to perform the operation
|
||||
stop immediately and the exception is reraised.
|
||||
|
||||
Otherwise, if the callback raises an Exception the error is logged and the
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
@@ -492,8 +514,7 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
res = await callback(destination)
|
||||
return res
|
||||
return await callback(destination)
|
||||
except InvalidResponseError as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except UnsupportedRoomVersionError:
|
||||
@@ -502,17 +523,15 @@ class FederationClient(FederationBase):
|
||||
synapse_error = e.to_synapse_error()
|
||||
failover = False
|
||||
|
||||
# Failover on an internal server error, or if the destination
|
||||
# doesn't implemented the endpoint for some reason.
|
||||
if 500 <= e.code < 600:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint:
|
||||
# there is no good way to detect an "unknown" endpoint. Dendrite
|
||||
# returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
if e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
):
|
||||
failover = True
|
||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||
e, synapse_error
|
||||
):
|
||||
failover = True
|
||||
|
||||
if not failover:
|
||||
raise synapse_error from e
|
||||
@@ -570,9 +589,8 @@ class FederationClient(FederationBase):
|
||||
UnsupportedRoomVersionError: if remote responds with
|
||||
a room version we don't understand.
|
||||
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -642,9 +660,8 @@ class FederationClient(FederationBase):
|
||||
``auth_chain``.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
@@ -673,7 +690,7 @@ class FederationClient(FederationBase):
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
raise InvalidResponseError("No create event in state")
|
||||
|
||||
# the room version should be sane.
|
||||
create_room_version = create_event.content.get(
|
||||
@@ -746,16 +763,11 @@ class FederationClient(FederationBase):
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
@@ -802,6 +814,11 @@ class FederationClient(FederationBase):
|
||||
|
||||
Returns:
|
||||
The event as a dict as returned by the remote server
|
||||
|
||||
Raises:
|
||||
SynapseError: if the remote server returns an error or if the server
|
||||
only supports the v1 endpoint and a room version other than "1"
|
||||
or "2" is requested.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
@@ -817,28 +834,19 @@ class FederationClient(FederationBase):
|
||||
},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API. That's ok provided the room uses old-style event
|
||||
# IDs.
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
||||
# Otherwise consider it a legitmate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code in (403, 429):
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
raise
|
||||
raise err
|
||||
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
@@ -865,9 +873,8 @@ class FederationClient(FederationBase):
|
||||
pdu: event to be sent
|
||||
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError if no servers were reachable.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> None:
|
||||
@@ -889,16 +896,11 @@ class FederationClient(FederationBase):
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
@@ -865,14 +864,6 @@ class FederationHandlerRegistry:
|
||||
# EDU received.
|
||||
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
)
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
) -> None:
|
||||
@@ -926,16 +917,6 @@ class FederationHandlerRegistry:
|
||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
||||
return
|
||||
|
||||
# If the incoming room key requests from a particular origin are over
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not await self._room_key_request_rate_limiter.can_do_action(
|
||||
None, origin
|
||||
)
|
||||
):
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
|
||||
510
synapse/federation/send_queue.py
Normal file
510
synapse/federation/send_queue.py
Normal file
@@ -0,0 +1,510 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""A federation sender that forwards things to be sent across replication to
|
||||
a worker process.
|
||||
|
||||
It assumes there is a single worker process feeding off of it.
|
||||
|
||||
Each row in the replication stream consists of a type and some json, where the
|
||||
types indicate whether they are presence, or edus, etc.
|
||||
|
||||
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
||||
updates since a particular point, all in-memory data since before that point is
|
||||
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
||||
dead worker doesn't cause the queues to grow limitlessly.
|
||||
|
||||
Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sized,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.replication.tcp.streams.federation import FederationStream
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(AbstractFederationSender):
|
||||
"""A drop in replacement for FederationSender"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# We may have multiple federation sender instances, so we need to track
|
||||
# their positions separately.
|
||||
self._sender_instances = hs.config.worker.federation_shard_config.instances
|
||||
self._sender_positions = {} # type: Dict[str, int]
|
||||
|
||||
# Pending presence map user_id -> UserPresenceState
|
||||
self.presence_map = {} # type: Dict[str, UserPresenceState]
|
||||
|
||||
# Stores the destinations we need to explicitly send presence to about a
|
||||
# given user.
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, Iterable[str]]]
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
|
||||
|
||||
# stream position -> (destination, key)
|
||||
self.keyed_edu_changed = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, tuple]]
|
||||
|
||||
self.edus = SortedDict() # type: SortedDict[int, Edu]
|
||||
|
||||
# stream ID for the next entry into keyed_edu_changed/edus.
|
||||
self.pos = 1
|
||||
|
||||
# map from stream ID to the time that stream entry was generated, so that we
|
||||
# can clear out entries after a while
|
||||
self.pos_time = SortedDict() # type: SortedDict[int, int]
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name: str, queue: Sized) -> None:
|
||||
LaterGauge(
|
||||
"synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"",
|
||||
[],
|
||||
lambda: len(queue),
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map",
|
||||
"keyed_edu",
|
||||
"keyed_edu_changed",
|
||||
"edus",
|
||||
"pos_time",
|
||||
"presence_destinations",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self) -> int:
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self) -> None:
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
keys = self.pos_time.keys()
|
||||
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
if not keys[:time]:
|
||||
return
|
||||
|
||||
position_to_delete = max(keys[:time])
|
||||
for key in keys[:time]:
|
||||
del self.pos_time[key]
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
keys = self.presence_destinations.keys()
|
||||
i = self.presence_destinations.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.presence_destinations[key]
|
||||
|
||||
user_ids = {user_id for user_id, _ in self.presence_destinations.values()}
|
||||
|
||||
to_del = [
|
||||
user_id for user_id in self.presence_map if user_id not in user_ids
|
||||
]
|
||||
for user_id in to_del:
|
||||
del self.presence_map[user_id]
|
||||
|
||||
# Delete things out of keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = self.keyed_edu_changed.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.keyed_edu_changed[key]
|
||||
|
||||
live_keys = set()
|
||||
for edu_key in self.keyed_edu_changed.values():
|
||||
live_keys.add(edu_key)
|
||||
|
||||
keys_to_del = [
|
||||
edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
|
||||
]
|
||||
for edu_key in keys_to_del:
|
||||
del self.keyed_edu[edu_key]
|
||||
|
||||
# Delete things out of edu map
|
||||
keys = self.edus.keys()
|
||||
i = self.edus.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
||||
"""As per FederationSender"""
|
||||
# This should never get called.
|
||||
raise NotImplementedError()
|
||||
|
||||
def build_and_send_edu(
|
||||
self,
|
||||
destination: str,
|
||||
edu_type: str,
|
||||
content: JsonDict,
|
||||
key: Optional[Hashable] = None,
|
||||
) -> None:
|
||||
"""As per FederationSender"""
|
||||
if destination == self.server_name:
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
pos = self._next_pos()
|
||||
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if key:
|
||||
assert isinstance(key, tuple)
|
||||
self.keyed_edu[(destination, key)] = edu
|
||||
self.keyed_edu_changed[pos] = (destination, key)
|
||||
else:
|
||||
self.edus[pos] = edu
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
receipt:
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
|
||||
def send_presence_to_destinations(
|
||||
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
||||
) -> None:
|
||||
"""As per FederationSender
|
||||
|
||||
Args:
|
||||
states
|
||||
destinations
|
||||
"""
|
||||
for state in states:
|
||||
pos = self._next_pos()
|
||||
self.presence_map.update({state.user_id: state for state in states})
|
||||
self.presence_destinations[pos] = (state.user_id, destinations)
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination: str) -> None:
|
||||
"""As per FederationSender"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
|
||||
def wake_destination(self, server: str) -> None:
|
||||
pass
|
||||
|
||||
def get_current_token(self) -> int:
|
||||
return self.pos - 1
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
if self._sender_instances:
|
||||
# If we have configured multiple federation sender instances we need
|
||||
# to track their positions separately, and only clear the queue up
|
||||
# to the token all instances have acked.
|
||||
self._sender_positions[instance_name] = token
|
||||
token = min(self._sender_positions.values())
|
||||
|
||||
self._clear_queue_before_pos(token)
|
||||
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
||||
"""Get rows to be sent over federation between the two tokens
|
||||
|
||||
Args:
|
||||
instance_name: the name of the current process
|
||||
from_token: the previous stream token: the starting point for fetching the
|
||||
updates
|
||||
to_token: the new stream token: the point to get updates up to
|
||||
target_row_count: a target for the number of rows to be returned.
|
||||
|
||||
Returns: a triplet `(updates, new_last_token, limited)`, where:
|
||||
* `updates` is a list of `(token, row)` entries.
|
||||
* `new_last_token` is the new position in stream.
|
||||
* `limited` is whether there are more updates to fetch.
|
||||
"""
|
||||
# TODO: Handle target_row_count.
|
||||
|
||||
# To handle restarts where we wrap around
|
||||
if from_token > self.pos:
|
||||
from_token = -1
|
||||
|
||||
# list of tuple(int, BaseFederationRow), where the first is the position
|
||||
# of the federation stream.
|
||||
rows = [] # type: List[Tuple[int, BaseFederationRow]]
|
||||
|
||||
# Fetch presence to send to destinations
|
||||
i = self.presence_destinations.bisect_right(from_token)
|
||||
j = self.presence_destinations.bisect_right(to_token) + 1
|
||||
|
||||
for pos, (user_id, dests) in self.presence_destinations.items()[i:j]:
|
||||
rows.append(
|
||||
(
|
||||
pos,
|
||||
PresenceDestinationsRow(
|
||||
state=self.presence_map[user_id], destinations=list(dests)
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Fetch changes keyed edus
|
||||
i = self.keyed_edu_changed.bisect_right(from_token)
|
||||
j = self.keyed_edu_changed.bisect_right(to_token) + 1
|
||||
# We purposefully clobber based on the key here, python dict comprehensions
|
||||
# always use the last value, so this will correctly point to the last
|
||||
# stream position.
|
||||
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
|
||||
|
||||
for ((destination, edu_key), pos) in keyed_edus.items():
|
||||
rows.append(
|
||||
(
|
||||
pos,
|
||||
KeyedEduRow(
|
||||
key=edu_key, edu=self.keyed_edu[(destination, edu_key)]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Fetch changed edus
|
||||
i = self.edus.bisect_right(from_token)
|
||||
j = self.edus.bisect_right(to_token) + 1
|
||||
edus = self.edus.items()[i:j]
|
||||
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EduRow(edu)))
|
||||
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
return (
|
||||
[(pos, (row.TypeId, row.to_data())) for pos, row in rows],
|
||||
to_token,
|
||||
False,
|
||||
)
|
||||
|
||||
|
||||
class BaseFederationRow:
|
||||
"""Base class for rows to be sent in the federation stream.
|
||||
|
||||
Specifies how to identify, serialize and deserialize the different types.
|
||||
"""
|
||||
|
||||
TypeId = "" # Unique string that ids the type. Must be overridden in sub classes.
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
"""Parse the data from the federation stream into a row.
|
||||
|
||||
Args:
|
||||
data: The value of ``data`` from FederationStreamRow.data, type
|
||||
depends on the type of stream
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def to_data(self):
|
||||
"""Serialize this row to be sent over the federation stream.
|
||||
|
||||
Returns:
|
||||
The value to be sent in FederationStreamRow.data. The type depends
|
||||
on the type of stream.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
"""Add this row to the appropriate field in the buffer ready for this
|
||||
to be sent over federation.
|
||||
|
||||
We use a buffer so that we can batch up events that have come in at
|
||||
the same time and send them all at once.
|
||||
|
||||
Args:
|
||||
buff (BufferedToSend)
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PresenceDestinationsRow(
|
||||
BaseFederationRow,
|
||||
namedtuple(
|
||||
"PresenceDestinationsRow",
|
||||
("state", "destinations"), # UserPresenceState # list[str]
|
||||
),
|
||||
):
|
||||
TypeId = "pd"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return PresenceDestinationsRow(
|
||||
state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"]
|
||||
)
|
||||
|
||||
def to_data(self):
|
||||
return {"state": self.state.as_dict(), "dests": self.destinations}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.presence_destinations.append((self.state, self.destinations))
|
||||
|
||||
|
||||
class KeyedEduRow(
|
||||
BaseFederationRow,
|
||||
namedtuple(
|
||||
"KeyedEduRow",
|
||||
("key", "edu"), # tuple(str) - the edu key passed to send_edu # Edu
|
||||
),
|
||||
):
|
||||
"""Streams EDUs that have an associated key that is ued to clobber. For example,
|
||||
typing EDUs clobber based on room_id.
|
||||
"""
|
||||
|
||||
TypeId = "k"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return KeyedEduRow(key=tuple(data["key"]), edu=Edu(**data["edu"]))
|
||||
|
||||
def to_data(self):
|
||||
return {"key": self.key, "edu": self.edu.get_internal_dict()}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.keyed_edus.setdefault(self.edu.destination, {})[self.key] = self.edu
|
||||
|
||||
|
||||
class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
|
||||
"""Streams EDUs that don't have keys. See KeyedEduRow"""
|
||||
|
||||
TypeId = "e"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return EduRow(Edu(**data))
|
||||
|
||||
def to_data(self):
|
||||
return self.edu.get_internal_dict()
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||
|
||||
|
||||
_rowtypes = (
|
||||
PresenceDestinationsRow,
|
||||
KeyedEduRow,
|
||||
EduRow,
|
||||
) # type: Tuple[Type[BaseFederationRow], ...]
|
||||
|
||||
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}
|
||||
|
||||
|
||||
ParsedFederationStreamData = namedtuple(
|
||||
"ParsedFederationStreamData",
|
||||
(
|
||||
"presence_destinations", # list of tuples of UserPresenceState and destinations
|
||||
"keyed_edus", # dict of destination -> { key -> Edu }
|
||||
"edus", # dict of destination -> [Edu]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def process_rows_for_federation(
|
||||
transaction_queue: FederationSender,
|
||||
rows: List[FederationStream.FederationStreamRow],
|
||||
) -> None:
|
||||
"""Parse a list of rows from the federation stream and put them in the
|
||||
transaction queue ready for sending to the relevant homeservers.
|
||||
|
||||
Args:
|
||||
transaction_queue
|
||||
rows
|
||||
"""
|
||||
|
||||
# The federation stream contains a bunch of different types of
|
||||
# rows that need to be handled differently. We parse the rows, put
|
||||
# them into the appropriate collection and then send them off.
|
||||
|
||||
buff = ParsedFederationStreamData(
|
||||
presence_destinations=[],
|
||||
keyed_edus={},
|
||||
edus={},
|
||||
)
|
||||
|
||||
# Parse the rows in the stream and add to the buffer
|
||||
for row in rows:
|
||||
if row.type not in TypeToRow:
|
||||
logger.error("Unrecognized federation row type %r", row.type)
|
||||
continue
|
||||
|
||||
RowType = TypeToRow[row.type]
|
||||
parsed_row = RowType.from_data(row.data)
|
||||
parsed_row.add_to_buffer(buff)
|
||||
|
||||
for state, destinations in buff.presence_destinations:
|
||||
transaction_queue.send_presence_to_destinations(
|
||||
states=[state], destinations=destinations
|
||||
)
|
||||
|
||||
for edu_map in buff.keyed_edus.values():
|
||||
for key, edu in edu_map.items():
|
||||
transaction_queue.send_edu(edu, key)
|
||||
|
||||
for edu_list in buff.edus.values():
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(edu, None)
|
||||
@@ -18,12 +18,15 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set,
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import (
|
||||
LaterGauge,
|
||||
event_processing_loop_counter,
|
||||
@@ -31,7 +34,7 @@ from synapse.metrics import (
|
||||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import Collection, JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -123,6 +126,10 @@ class AbstractFederationSender(metaclass=abc.ABCMeta):
|
||||
def get_current_token(self) -> int:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
@@ -248,27 +255,15 @@ class FederationSender(AbstractFederationSender):
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
async def get_destinations_for_event(
|
||||
event: EventBase,
|
||||
) -> Collection[str]:
|
||||
"""Computes the destinations to which this event must be sent.
|
||||
|
||||
This returns an empty tuple when there are no destinations to send to,
|
||||
or if this event is not from this homeserver and it is not sending
|
||||
it on behalf of another server.
|
||||
|
||||
Will also filter out destinations which this sender is not responsible for,
|
||||
if multiple federation senders exist.
|
||||
"""
|
||||
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
return ()
|
||||
return
|
||||
|
||||
if not event.internal_metadata.should_proactively_send():
|
||||
return ()
|
||||
return
|
||||
|
||||
destinations = None # type: Optional[Set[str]]
|
||||
if not event.prev_event_ids():
|
||||
@@ -303,7 +298,7 @@ class FederationSender(AbstractFederationSender):
|
||||
"Failed to calculate hosts in room for event: %s",
|
||||
event.event_id,
|
||||
)
|
||||
return ()
|
||||
return
|
||||
|
||||
destinations = {
|
||||
d
|
||||
@@ -313,15 +308,17 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
}
|
||||
|
||||
destinations.discard(self.server_name)
|
||||
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
if destinations:
|
||||
await self._send_pdu(event, destinations)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(event.event_id)
|
||||
|
||||
@@ -329,29 +326,24 @@ class FederationSender(AbstractFederationSender):
|
||||
"federation_sender"
|
||||
).observe((now - ts) / 1000)
|
||||
|
||||
return destinations
|
||||
return ()
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
await handle_event(event)
|
||||
|
||||
async def get_federatable_events_and_destinations(
|
||||
events: Iterable[EventBase],
|
||||
) -> List[Tuple[EventBase, Collection[str]]]:
|
||||
with Measure(self.clock, "get_destinations_for_events"):
|
||||
# Fetch federation destinations per event,
|
||||
# skip if get_destinations_for_event returns an empty collection,
|
||||
# return list of event->destinations pairs.
|
||||
return [
|
||||
(event, dests)
|
||||
for (event, dests) in [
|
||||
(event, await get_destinations_for_event(event))
|
||||
for event in events
|
||||
]
|
||||
if dests
|
||||
]
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
events_and_dests = await get_federatable_events_and_destinations(events)
|
||||
|
||||
# Send corresponding events to each destination queue
|
||||
await self._distribute_events(events_and_dests)
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(handle_room_events, evs)
|
||||
for evs in events_by_room.values()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
@@ -369,7 +361,7 @@ class FederationSender(AbstractFederationSender):
|
||||
events_processed_counter.inc(len(events))
|
||||
|
||||
event_processing_loop_room_count.labels("federation_sender").inc(
|
||||
len({event.room_id for event in events})
|
||||
len(events_by_room)
|
||||
)
|
||||
|
||||
event_processing_loop_counter.labels("federation_sender").inc()
|
||||
@@ -381,53 +373,34 @@ class FederationSender(AbstractFederationSender):
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
async def _distribute_events(
|
||||
self,
|
||||
events_and_dests: Iterable[Tuple[EventBase, Collection[str]]],
|
||||
) -> None:
|
||||
"""Distribute events to the respective per_destination queues.
|
||||
async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
Also persists last-seen per-room stream_ordering to 'destination_rooms'.
|
||||
destinations = set(destinations)
|
||||
destinations.discard(self.server_name)
|
||||
logger.debug("Sending to: %s", str(destinations))
|
||||
|
||||
Args:
|
||||
events_and_dests: A list of tuples, which are (event: EventBase, destinations: Collection[str]).
|
||||
Every event is paired with its intended destinations (in federation).
|
||||
"""
|
||||
# Tuples of room_id + destination to their max-seen stream_ordering
|
||||
room_with_dest_stream_ordering = {} # type: Dict[Tuple[str, str], int]
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
# List of events to send to each destination
|
||||
events_by_dest = {} # type: Dict[str, List[EventBase]]
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
|
||||
# For each event-destinations pair...
|
||||
for event, destinations in events_and_dests:
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
|
||||
# (we got this from the database, it's filled)
|
||||
assert event.internal_metadata.stream_ordering
|
||||
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
|
||||
# ...iterate over those destinations..
|
||||
for destination in destinations:
|
||||
# ...update their stream-ordering...
|
||||
room_with_dest_stream_ordering[(event.room_id, destination)] = max(
|
||||
event.internal_metadata.stream_ordering,
|
||||
room_with_dest_stream_ordering.get((event.room_id, destination), 0),
|
||||
)
|
||||
|
||||
# ...and add the event to each destination queue.
|
||||
events_by_dest.setdefault(destination, []).append(event)
|
||||
|
||||
# Bulk-store destination_rooms stream_ids
|
||||
await self.store.bulk_store_destination_rooms_entries(
|
||||
room_with_dest_stream_ordering
|
||||
# track the fact that we have a PDU for these destinations,
|
||||
# to allow us to perform catch-up later on if the remote is unreachable
|
||||
# for a while.
|
||||
await self.store.store_destination_rooms_entries(
|
||||
destinations,
|
||||
pdu.room_id,
|
||||
pdu.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
for destination, pdus in events_by_dest.items():
|
||||
logger.debug("Sending %d pdus to %s", len(pdus), destination)
|
||||
|
||||
self._get_per_destination_queue(destination).send_pdus(pdus)
|
||||
for destination in destinations:
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu)
|
||||
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
@@ -535,6 +508,10 @@ class FederationSender(AbstractFederationSender):
|
||||
# No-op if presence is disabled.
|
||||
return
|
||||
|
||||
# Ensure we only send out presence states for local users.
|
||||
for state in states:
|
||||
assert self.is_mine_id(state.user_id)
|
||||
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
@@ -631,6 +608,10 @@ class FederationSender(AbstractFederationSender):
|
||||
# to a worker.
|
||||
return 0
|
||||
|
||||
def federation_ack(self, instance_name: str, token: int) -> None:
|
||||
# It is not expected that this gets called on FederationSender.
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
async def get_replication_rows(
|
||||
instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -154,22 +155,19 @@ class PerDestinationQueue:
|
||||
+ len(self._pending_edus_keyed)
|
||||
)
|
||||
|
||||
def send_pdus(self, pdus: Iterable[EventBase]) -> None:
|
||||
"""Add PDUs to the queue, and start the transmission loop if necessary
|
||||
def send_pdu(self, pdu: EventBase) -> None:
|
||||
"""Add a PDU to the queue, and start the transmission loop if necessary
|
||||
|
||||
Args:
|
||||
pdus: pdus to send
|
||||
pdu: pdu to send
|
||||
"""
|
||||
if not self._catching_up or self._last_successful_stream_ordering is None:
|
||||
# only enqueue the PDU if we are not catching up (False) or do not
|
||||
# yet know if we have anything to catch up (None)
|
||||
self._pending_pdus.extend(pdus)
|
||||
self._pending_pdus.append(pdu)
|
||||
else:
|
||||
self._catchup_last_skipped = max(
|
||||
pdu.internal_metadata.stream_ordering
|
||||
for pdu in pdus
|
||||
if pdu.internal_metadata.stream_ordering is not None
|
||||
)
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
self._catchup_last_skipped = pdu.internal_metadata.stream_ordering
|
||||
|
||||
self.attempt_new_transaction()
|
||||
|
||||
@@ -577,6 +575,14 @@ class PerDestinationQueue:
|
||||
for content in contents
|
||||
]
|
||||
|
||||
if edus:
|
||||
issue9533_logger.debug(
|
||||
"Sending %i to-device messages to %s, up to stream id %i",
|
||||
len(edus),
|
||||
self._destination,
|
||||
stream_id,
|
||||
)
|
||||
|
||||
return (edus, stream_id)
|
||||
|
||||
def _start_catching_up(self) -> None:
|
||||
|
||||
@@ -995,6 +995,7 @@ class TransportLayerClient:
|
||||
returned per space
|
||||
exclude_rooms: a list of any rooms we can skip
|
||||
"""
|
||||
# TODO When switching to the stable endpoint, use GET instead of POST.
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id
|
||||
)
|
||||
|
||||
@@ -1376,6 +1376,32 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/spaces/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
||||
|
||||
exclude_rooms = []
|
||||
if b"exclude_rooms" in query:
|
||||
try:
|
||||
exclude_rooms = [
|
||||
room_id.decode("ascii") for room_id in query[b"exclude_rooms"]
|
||||
]
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
# TODO When switching to the stable endpoint, remove the POST handler.
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
|
||||
@@ -17,7 +17,7 @@ import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -39,28 +39,44 @@ class AccountValidityHandler:
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.clock = self.hs.get_clock()
|
||||
|
||||
self._account_validity = self.hs.config.account_validity
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
self._account_validity_renew_by_email_enabled = (
|
||||
hs.config.account_validity.account_validity_renew_by_email_enabled
|
||||
)
|
||||
|
||||
self._account_validity_period = None
|
||||
if self._account_validity_enabled:
|
||||
self._account_validity_period = (
|
||||
hs.config.account_validity.account_validity_period
|
||||
)
|
||||
|
||||
if (
|
||||
self._account_validity.enabled
|
||||
and self._account_validity.renew_by_email_enabled
|
||||
self._account_validity_enabled
|
||||
and self._account_validity_renew_by_email_enabled
|
||||
):
|
||||
# Don't do email-specific configuration if renewal by email is disabled.
|
||||
self._template_html = self.config.account_validity_template_html
|
||||
self._template_text = self.config.account_validity_template_text
|
||||
self._template_html = (
|
||||
hs.config.account_validity.account_validity_template_html
|
||||
)
|
||||
self._template_text = (
|
||||
hs.config.account_validity.account_validity_template_text
|
||||
)
|
||||
account_validity_renew_email_subject = (
|
||||
hs.config.account_validity.account_validity_renew_email_subject
|
||||
)
|
||||
|
||||
try:
|
||||
app_name = self.hs.config.email_app_name
|
||||
app_name = hs.config.email_app_name
|
||||
|
||||
self._subject = self._account_validity.renew_email_subject % {
|
||||
"app": app_name
|
||||
}
|
||||
self._subject = account_validity_renew_email_subject % {"app": app_name}
|
||||
|
||||
self._from_string = self.hs.config.email_notif_from % {"app": app_name}
|
||||
self._from_string = hs.config.email_notif_from % {"app": app_name}
|
||||
except Exception:
|
||||
# If substitution failed, fall back to the bare strings.
|
||||
self._subject = self._account_validity.renew_email_subject
|
||||
self._from_string = self.hs.config.email_notif_from
|
||||
self._subject = account_validity_renew_email_subject
|
||||
self._from_string = hs.config.email_notif_from
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
@@ -220,50 +236,87 @@ class AccountValidityHandler:
|
||||
attempts += 1
|
||||
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
|
||||
|
||||
async def renew_account(self, renewal_token: str) -> bool:
|
||||
async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]:
|
||||
"""Renews the account attached to a given renewal token by pushing back the
|
||||
expiration date by the current validity period in the server's configuration.
|
||||
|
||||
If it turns out that the token is valid but has already been used, then the
|
||||
token is considered stale. A token is stale if the 'token_used_ts_ms' db column
|
||||
is non-null.
|
||||
|
||||
Args:
|
||||
renewal_token: Token sent with the renewal request.
|
||||
Returns:
|
||||
Whether the provided token is valid.
|
||||
A tuple containing:
|
||||
* A bool representing whether the token is valid and unused.
|
||||
* A bool which is `True` if the token is valid, but stale.
|
||||
* An int representing the user's expiry timestamp as milliseconds since the
|
||||
epoch, or 0 if the token was invalid.
|
||||
"""
|
||||
try:
|
||||
user_id = await self.store.get_user_from_renewal_token(renewal_token)
|
||||
(
|
||||
user_id,
|
||||
current_expiration_ts,
|
||||
token_used_ts,
|
||||
) = await self.store.get_user_from_renewal_token(renewal_token)
|
||||
except StoreError:
|
||||
return False
|
||||
return False, False, 0
|
||||
|
||||
# Check whether this token has already been used.
|
||||
if token_used_ts:
|
||||
logger.info(
|
||||
"User '%s' attempted to use previously used token '%s' to renew account",
|
||||
user_id,
|
||||
renewal_token,
|
||||
)
|
||||
return False, True, current_expiration_ts
|
||||
|
||||
logger.debug("Renewing an account for user %s", user_id)
|
||||
await self.renew_account_for_user(user_id)
|
||||
|
||||
return True
|
||||
# Renew the account. Pass the renewal_token here so that it is not cleared.
|
||||
# We want to keep the token around in case the user attempts to renew their
|
||||
# account with the same token twice (clicking the email link twice).
|
||||
#
|
||||
# In that case, the token will be accepted, but the account's expiration ts
|
||||
# will remain unchanged.
|
||||
new_expiration_ts = await self.renew_account_for_user(
|
||||
user_id, renewal_token=renewal_token
|
||||
)
|
||||
|
||||
return True, False, new_expiration_ts
|
||||
|
||||
async def renew_account_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
expiration_ts: Optional[int] = None,
|
||||
email_sent: bool = False,
|
||||
renewal_token: Optional[str] = None,
|
||||
) -> int:
|
||||
"""Renews the account attached to a given user by pushing back the
|
||||
expiration date by the current validity period in the server's
|
||||
configuration.
|
||||
|
||||
Args:
|
||||
renewal_token: Token sent with the renewal request.
|
||||
user_id: The ID of the user to renew.
|
||||
expiration_ts: New expiration date. Defaults to now + validity period.
|
||||
email_sen: Whether an email has been sent for this validity period.
|
||||
Defaults to False.
|
||||
email_sent: Whether an email has been sent for this validity period.
|
||||
renewal_token: Token sent with the renewal request. The user's token
|
||||
will be cleared if this is None.
|
||||
|
||||
Returns:
|
||||
New expiration date for this account, as a timestamp in
|
||||
milliseconds since epoch.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
if expiration_ts is None:
|
||||
expiration_ts = self.clock.time_msec() + self._account_validity.period
|
||||
expiration_ts = now + self._account_validity_period
|
||||
|
||||
await self.store.set_account_validity_for_user(
|
||||
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
|
||||
user_id=user_id,
|
||||
expiration_ts=expiration_ts,
|
||||
email_sent=email_sent,
|
||||
renewal_token=renewal_token,
|
||||
token_used_ts=now,
|
||||
)
|
||||
|
||||
return expiration_ts
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Union
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -33,7 +33,7 @@ from synapse.metrics.background_process_metrics import (
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, UserID
|
||||
from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -17,6 +17,7 @@ import logging
|
||||
import time
|
||||
import unicodedata
|
||||
import urllib.parse
|
||||
from binascii import crc32
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -34,6 +35,7 @@ from typing import (
|
||||
import attr
|
||||
import bcrypt
|
||||
import pymacaroons
|
||||
import unpaddedbase64
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
@@ -66,6 +68,7 @@ from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -808,10 +811,12 @@ class AuthHandler(BaseHandler):
|
||||
logger.info(
|
||||
"Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(puppets_user_id)
|
||||
else:
|
||||
logger.info(
|
||||
"Logging in user %s on device %s%s", user_id, device_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(user_id)
|
||||
|
||||
if (
|
||||
not is_appservice_ghost
|
||||
@@ -819,7 +824,7 @@ class AuthHandler(BaseHandler):
|
||||
):
|
||||
await self.auth.check_auth_blocking(user_id)
|
||||
|
||||
access_token = self.macaroon_gen.generate_access_token(user_id)
|
||||
access_token = self.generate_access_token(target_user_id_obj)
|
||||
await self.store.add_access_token_to_user(
|
||||
user_id=user_id,
|
||||
token=access_token,
|
||||
@@ -1192,6 +1197,19 @@ class AuthHandler(BaseHandler):
|
||||
return None
|
||||
return user_id
|
||||
|
||||
def generate_access_token(self, for_user: UserID) -> str:
|
||||
"""Generates an opaque string, for use as an access token"""
|
||||
|
||||
# we use the following format for access tokens:
|
||||
# syt_<base64 local part>_<random string>_<base62 crc check>
|
||||
|
||||
b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8"))
|
||||
random_string = stringutils.random_string(20)
|
||||
base = f"syt_{b64local}_{random_string}"
|
||||
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
@@ -1248,7 +1266,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
for token, token_id, device_id in tokens_and_devices:
|
||||
for token, _, device_id in tokens_and_devices:
|
||||
await provider.on_logged_out(
|
||||
user_id=user_id, device_id=device_id, access_token=token
|
||||
)
|
||||
@@ -1585,10 +1603,7 @@ class MacaroonGenerator:
|
||||
|
||||
hs = attr.ib()
|
||||
|
||||
def generate_access_token(
|
||||
self, user_id: str, extra_caveats: Optional[List[str]] = None
|
||||
) -> str:
|
||||
extra_caveats = extra_caveats or []
|
||||
def generate_guest_access_token(self, user_id: str) -> str:
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
# Include a nonce, to make sure that each login gets a different
|
||||
@@ -1596,8 +1611,7 @@ class MacaroonGenerator:
|
||||
macaroon.add_first_party_caveat(
|
||||
"nonce = %s" % (stringutils.random_string_with_symbols(16),)
|
||||
)
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
macaroon.add_first_party_caveat("guest = true")
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(
|
||||
|
||||
@@ -49,7 +49,9 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
if hs.config.run_background_tasks:
|
||||
hs.get_reactor().callWhenRunning(self._start_user_parting)
|
||||
|
||||
self._account_validity_enabled = hs.config.account_validity.enabled
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
|
||||
async def deactivate_account(
|
||||
self,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
@@ -28,7 +28,6 @@ from synapse.api.errors import (
|
||||
from synapse.logging.opentracing import log_kv, set_tag, trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import (
|
||||
Collection,
|
||||
JsonDict,
|
||||
StreamToken,
|
||||
UserID,
|
||||
@@ -156,8 +155,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
# The user may have left the room
|
||||
# TODO: Check if they actually did or if we were just invited.
|
||||
if room_id not in room_ids:
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_left.add(state_key)
|
||||
@@ -179,8 +177,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
log_kv(
|
||||
{"event": "encountered empty previous state", "room_id": room_id}
|
||||
)
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
@@ -198,8 +195,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
for state_dict in prev_state_ids.values():
|
||||
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
||||
if not member_event or member_event != current_member_id:
|
||||
for key, event_id in current_state_ids.items():
|
||||
etype, state_key = key
|
||||
for etype, state_key in current_state_ids.keys():
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
possibly_changed.add(state_key)
|
||||
@@ -484,7 +480,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
"device_list_key", position, users=[user_id], rooms=room_ids
|
||||
)
|
||||
|
||||
if hosts and self.federation_sender:
|
||||
if hosts:
|
||||
logger.info(
|
||||
"Sending device list update notif for %r to: %r", user_id, hosts
|
||||
)
|
||||
@@ -714,7 +710,7 @@ class DeviceListUpdater:
|
||||
# This can happen since we batch updates
|
||||
return
|
||||
|
||||
for device_id, stream_id, prev_ids, content in pending_updates:
|
||||
for device_id, stream_id, prev_ids, _ in pending_updates:
|
||||
logger.debug(
|
||||
"Handling update %r/%r, ID: %r, prev: %r ",
|
||||
user_id,
|
||||
@@ -740,7 +736,7 @@ class DeviceListUpdater:
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
for device_id, stream_id, prev_ids, content in pending_updates:
|
||||
for device_id, stream_id, _, content in pending_updates:
|
||||
await self.store.update_remote_device_list_cache_entry(
|
||||
user_id, device_id, content, stream_id
|
||||
)
|
||||
@@ -929,6 +925,10 @@ class DeviceListUpdater:
|
||||
else:
|
||||
cached_devices = await self.store.get_cached_devices_for_user(user_id)
|
||||
if cached_devices == {d["device_id"]: d for d in devices}:
|
||||
logging.info(
|
||||
"Skipping device list resync for %s, as our cache matches already",
|
||||
user_id,
|
||||
)
|
||||
devices = []
|
||||
ignore_devices = True
|
||||
|
||||
@@ -944,6 +944,9 @@ class DeviceListUpdater:
|
||||
await self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id
|
||||
)
|
||||
# mark the cache as valid, whether or not we actually processed any device
|
||||
# list updates.
|
||||
await self.store.mark_remote_user_device_cache_as_valid(user_id)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
|
||||
# Handle cross-signing keys.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.constants import ToDeviceEventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -51,7 +51,9 @@ class DeviceMessageHandler:
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
|
||||
# in the to-device replication stream.
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.federation_sender = None
|
||||
if hs.should_send_federation():
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
# If we can handle the to device EDUs we do so, otherwise we route them
|
||||
# to the appropriate worker.
|
||||
@@ -77,6 +79,8 @@ class DeviceMessageHandler:
|
||||
ReplicationUserDevicesResyncRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# a rate limiter for room key requests. The keys are
|
||||
# (sending_user_id, sending_device_id).
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
@@ -98,12 +102,25 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in content["messages"].items():
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
logger.warning("Request for keys for non-local user %s", user_id)
|
||||
logger.warning("To-device message to non-local user %s", user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not by_device:
|
||||
continue
|
||||
|
||||
# Ratelimit key requests by the sending user.
|
||||
if message_type == ToDeviceEventTypes.RoomKeyRequest:
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
None, (sender_user_id, None)
|
||||
)
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
@@ -190,13 +207,19 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in messages.items():
|
||||
# Ratelimit local cross-user key requests by the sending device.
|
||||
if (
|
||||
message_type == EduTypes.RoomKeyRequest
|
||||
message_type == ToDeviceEventTypes.RoomKeyRequest
|
||||
and user_id != sender_user_id
|
||||
and await self._ratelimiter.can_do_action(
|
||||
):
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
requester, (sender_user_id, requester.device_id)
|
||||
)
|
||||
):
|
||||
continue
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
import logging
|
||||
import string
|
||||
from typing import Iterable, List, Optional
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
|
||||
from synapse.api.errors import (
|
||||
@@ -27,15 +27,19 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DirectoryHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -60,7 +64,7 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id: str,
|
||||
servers: Optional[Iterable[str]] = None,
|
||||
creator: Optional[str] = None,
|
||||
):
|
||||
) -> None:
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
@@ -74,7 +78,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
servers = {get_domain_from_id(u) for u in users}
|
||||
|
||||
if not servers:
|
||||
@@ -104,8 +108,9 @@ class DirectoryHandler(BaseHandler):
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
room_alias_str = room_alias.to_string()
|
||||
|
||||
if len(room_alias.to_string()) > MAX_ALIAS_LENGTH:
|
||||
if len(room_alias_str) > MAX_ALIAS_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,
|
||||
@@ -114,7 +119,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
service = requester.app_service
|
||||
if service:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
if not service.is_interested_in_alias(room_alias_str):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias.",
|
||||
@@ -138,7 +143,7 @@ class DirectoryHandler(BaseHandler):
|
||||
raise AuthError(403, "This user is not permitted to create this alias")
|
||||
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias.to_string()
|
||||
user_id, room_id, room_alias_str
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
@@ -211,7 +216,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def delete_appservice_association(
|
||||
self, service: ApplicationService, room_alias: RoomAlias
|
||||
):
|
||||
) -> None:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -220,7 +225,7 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
await self._delete_association(room_alias)
|
||||
|
||||
async def _delete_association(self, room_alias: RoomAlias):
|
||||
async def _delete_association(self, room_alias: RoomAlias) -> str:
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
@@ -228,17 +233,19 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return room_id
|
||||
|
||||
async def get_association(self, room_alias: RoomAlias):
|
||||
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
|
||||
room_id = None
|
||||
if self.hs.is_mine(room_alias):
|
||||
result = await self.get_association_from_room_alias(room_alias)
|
||||
result = await self.get_association_from_room_alias(
|
||||
room_alias
|
||||
) # type: Optional[RoomAliasMapping]
|
||||
|
||||
if result:
|
||||
room_id = result.room_id
|
||||
servers = result.servers
|
||||
else:
|
||||
try:
|
||||
result = await self.federation.make_query(
|
||||
fed_result = await self.federation.make_query(
|
||||
destination=room_alias.domain,
|
||||
query_type="directory",
|
||||
args={"room_alias": room_alias.to_string()},
|
||||
@@ -248,13 +255,13 @@ class DirectoryHandler(BaseHandler):
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
if e.code == 404:
|
||||
result = None
|
||||
fed_result = None
|
||||
else:
|
||||
raise
|
||||
|
||||
if result and "room_id" in result and "servers" in result:
|
||||
room_id = result["room_id"]
|
||||
servers = result["servers"]
|
||||
if fed_result and "room_id" in fed_result and "servers" in fed_result:
|
||||
room_id = fed_result["room_id"]
|
||||
servers = fed_result["servers"]
|
||||
|
||||
if not room_id:
|
||||
raise SynapseError(
|
||||
@@ -263,7 +270,7 @@ class DirectoryHandler(BaseHandler):
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
extra_servers = {get_domain_from_id(u) for u in users}
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
@@ -275,7 +282,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return {"room_id": room_id, "servers": servers}
|
||||
|
||||
async def on_directory_query(self, args):
|
||||
async def on_directory_query(self, args: JsonDict) -> JsonDict:
|
||||
room_alias = RoomAlias.from_string(args["room_alias"])
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
|
||||
@@ -293,7 +300,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def _update_canonical_alias(
|
||||
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
|
||||
):
|
||||
) -> None:
|
||||
"""
|
||||
Send an updated canonical alias event if the removed alias was set as
|
||||
the canonical alias or listed in the alt_aliases field.
|
||||
@@ -344,7 +351,9 @@ class DirectoryHandler(BaseHandler):
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
async def get_association_from_room_alias(self, room_alias: RoomAlias):
|
||||
async def get_association_from_room_alias(
|
||||
self, room_alias: RoomAlias
|
||||
) -> Optional[RoomAliasMapping]:
|
||||
result = await self.store.get_association_from_room_alias(room_alias)
|
||||
if not result:
|
||||
# Query AS to see if it exists
|
||||
@@ -372,7 +381,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
return True
|
||||
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str):
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool:
|
||||
"""Determine whether a user can delete an alias.
|
||||
|
||||
One of the following must be true:
|
||||
@@ -394,14 +403,13 @@ class DirectoryHandler(BaseHandler):
|
||||
if not room_id:
|
||||
return False
|
||||
|
||||
res = await self.auth.check_can_change_room_list(
|
||||
return await self.auth.check_can_change_room_list(
|
||||
room_id, UserID.from_string(user_id)
|
||||
)
|
||||
return res
|
||||
|
||||
async def edit_published_room_list(
|
||||
self, requester: Requester, room_id: str, visibility: str
|
||||
):
|
||||
) -> None:
|
||||
"""Edit the entry of the room in the published room list.
|
||||
|
||||
requester
|
||||
@@ -469,7 +477,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def edit_published_appservice_room_list(
|
||||
self, appservice_id: str, network_id: str, room_id: str, visibility: str
|
||||
):
|
||||
) -> None:
|
||||
"""Add or remove a room from the appservice/network specific public
|
||||
room list.
|
||||
|
||||
@@ -499,5 +507,4 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
|
||||
aliases = await self.store.get_aliases_for_room(room_id)
|
||||
return aliases
|
||||
return await self.store.get_aliases_for_room(room_id)
|
||||
|
||||
86
synapse/handlers/event_auth.py
Normal file
86
synapse/handlers/event_auth.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.types import StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class EventAuthHandler:
|
||||
"""
|
||||
This class contains methods for authenticating events added to room graphs.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def can_join_without_invite(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
|
||||
) -> bool:
|
||||
"""
|
||||
Check whether a user can join a room without an invite.
|
||||
|
||||
When joining a room with restricted joined rules (as defined in MSC3083),
|
||||
the membership of spaces must be checked during join.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room being joined.
|
||||
user_id: The user joining the room.
|
||||
|
||||
Returns:
|
||||
True if the user can join the room, false otherwise.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
return True
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return True
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
|
||||
return True
|
||||
|
||||
# If allowed is of the wrong form, then only allow invited users.
|
||||
allowed_spaces = join_rules_event.content.get("allow", [])
|
||||
if not isinstance(allowed_spaces, list):
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
|
||||
# Pull out the other room IDs, invalid data gets filtered.
|
||||
for space in allowed_spaces:
|
||||
if not isinstance(space, dict):
|
||||
continue
|
||||
|
||||
space_id = space.get("space")
|
||||
if not isinstance(space_id, str):
|
||||
continue
|
||||
|
||||
# The user was joined to one of the spaces specified, they can join
|
||||
# this room!
|
||||
if space_id in joined_rooms:
|
||||
return True
|
||||
|
||||
# The user was not in any of the required spaces.
|
||||
return False
|
||||
@@ -103,7 +103,7 @@ class EventStreamHandler(BaseHandler):
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = await self.state.get_current_users_in_room(
|
||||
users = await self.store.get_users_in_room(
|
||||
event.room_id
|
||||
) # type: Iterable[str]
|
||||
else:
|
||||
|
||||
@@ -146,6 +146,7 @@ class FederationHandler(BaseHandler):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._message_handler = hs.get_message_handler()
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
self.config = hs.config
|
||||
@@ -551,8 +552,12 @@ class FederationHandler(BaseHandler):
|
||||
destination: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
) -> Tuple[List[EventBase], List[EventBase]]:
|
||||
"""Requests all of the room state at a given event from a remote homeserver.
|
||||
) -> List[EventBase]:
|
||||
"""Requests all of the room state at a given event from a remote
|
||||
homeserver.
|
||||
|
||||
Will also fetch any missing events reported in the `auth_chain_ids`
|
||||
section of `/state_ids`.
|
||||
|
||||
Args:
|
||||
destination: The remote homeserver to query for the state.
|
||||
@@ -560,8 +565,7 @@ class FederationHandler(BaseHandler):
|
||||
event_id: The id of the event we want the state at.
|
||||
|
||||
Returns:
|
||||
A list of events in the state, not including the event itself, and
|
||||
a list of events in the auth chain for the given event.
|
||||
A list of events in the state, not including the event itself.
|
||||
"""
|
||||
(
|
||||
state_event_ids,
|
||||
@@ -570,68 +574,53 @@ class FederationHandler(BaseHandler):
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
desired_events = set(state_event_ids + auth_event_ids)
|
||||
|
||||
event_map = await self._get_events_from_store_or_dest(
|
||||
destination, room_id, desired_events
|
||||
)
|
||||
|
||||
failed_to_fetch = desired_events - event_map.keys()
|
||||
if failed_to_fetch:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state/auth events for %s %s",
|
||||
event_id,
|
||||
failed_to_fetch,
|
||||
)
|
||||
|
||||
remote_state = [
|
||||
event_map[e_id] for e_id in state_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
return remote_state, auth_chain
|
||||
|
||||
async def _get_events_from_store_or_dest(
|
||||
self, destination: str, room_id: str, event_ids: Iterable[str]
|
||||
) -> Dict[str, EventBase]:
|
||||
"""Fetch events from a remote destination, checking if we already have them.
|
||||
|
||||
Persists any events we don't already have as outliers.
|
||||
|
||||
If we fail to fetch any of the events, a warning will be logged, and the event
|
||||
will be omitted from the result. Likewise, any events which turn out not to
|
||||
be in the given room.
|
||||
|
||||
This function *does not* automatically get missing auth events of the
|
||||
newly fetched events. Callers must include the full auth chain of
|
||||
of the missing events in the `event_ids` argument, to ensure that any
|
||||
missing auth events are correctly fetched.
|
||||
|
||||
Returns:
|
||||
map from event_id to event
|
||||
"""
|
||||
fetched_events = await self.store.get_events(event_ids, allow_rejected=True)
|
||||
|
||||
missing_events = set(event_ids) - fetched_events.keys()
|
||||
|
||||
if missing_events:
|
||||
logger.debug(
|
||||
"Fetching unknown state/auth events %s for room %s",
|
||||
missing_events,
|
||||
room_id,
|
||||
)
|
||||
# Fetch the state events from the DB, and check we have the auth events.
|
||||
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
|
||||
auth_events_in_store = await self.store.have_seen_events(auth_event_ids)
|
||||
|
||||
# Check for missing events. We handle state and auth event seperately,
|
||||
# as we want to pull the state from the DB, but we don't for the auth
|
||||
# events. (Note: we likely won't use the majority of the auth chain, and
|
||||
# it can be *huge* for large rooms, so it's worth ensuring that we don't
|
||||
# unnecessarily pull it from the DB).
|
||||
missing_state_events = set(state_event_ids) - set(event_map)
|
||||
missing_auth_events = set(auth_event_ids) - set(auth_events_in_store)
|
||||
if missing_state_events or missing_auth_events:
|
||||
await self._get_events_and_persist(
|
||||
destination=destination, room_id=room_id, events=missing_events
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
events=missing_state_events | missing_auth_events,
|
||||
)
|
||||
|
||||
# we need to make sure we re-load from the database to get the rejected
|
||||
# state correct.
|
||||
fetched_events.update(
|
||||
(await self.store.get_events(missing_events, allow_rejected=True))
|
||||
)
|
||||
if missing_state_events:
|
||||
new_events = await self.store.get_events(
|
||||
missing_state_events, allow_rejected=True
|
||||
)
|
||||
event_map.update(new_events)
|
||||
|
||||
missing_state_events.difference_update(new_events)
|
||||
|
||||
if missing_state_events:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state events for %s %s",
|
||||
event_id,
|
||||
missing_state_events,
|
||||
)
|
||||
|
||||
if missing_auth_events:
|
||||
auth_events_in_store = await self.store.have_seen_events(
|
||||
missing_auth_events
|
||||
)
|
||||
missing_auth_events.difference_update(auth_events_in_store)
|
||||
|
||||
if missing_auth_events:
|
||||
logger.warning(
|
||||
"Failed to fetch missing auth events for %s %s",
|
||||
event_id,
|
||||
missing_auth_events,
|
||||
)
|
||||
|
||||
remote_state = list(event_map.values())
|
||||
|
||||
# check for events which were in the wrong room.
|
||||
#
|
||||
@@ -639,8 +628,8 @@ class FederationHandler(BaseHandler):
|
||||
# auth_events at an event in room A are actually events in room B
|
||||
|
||||
bad_events = [
|
||||
(event_id, event.room_id)
|
||||
for event_id, event in fetched_events.items()
|
||||
(event.event_id, event.room_id)
|
||||
for event in remote_state
|
||||
if event.room_id != room_id
|
||||
]
|
||||
|
||||
@@ -657,9 +646,10 @@ class FederationHandler(BaseHandler):
|
||||
room_id,
|
||||
)
|
||||
|
||||
del fetched_events[bad_event_id]
|
||||
if bad_events:
|
||||
remote_state = [e for e in remote_state if e.room_id == room_id]
|
||||
|
||||
return fetched_events
|
||||
return remote_state
|
||||
|
||||
async def _get_state_after_missing_prev_event(
|
||||
self,
|
||||
@@ -962,27 +952,23 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# For each edge get the current state.
|
||||
|
||||
auth_events = {}
|
||||
state_events = {}
|
||||
events_to_state = {}
|
||||
for e_id in edges:
|
||||
state, auth = await self._get_state_for_room(
|
||||
state = await self._get_state_for_room(
|
||||
destination=dest,
|
||||
room_id=room_id,
|
||||
event_id=e_id,
|
||||
)
|
||||
auth_events.update({a.event_id: a for a in auth})
|
||||
auth_events.update({s.event_id: s for s in state})
|
||||
state_events.update({s.event_id: s for s in state})
|
||||
events_to_state[e_id] = state
|
||||
|
||||
required_auth = {
|
||||
a_id
|
||||
for event in events
|
||||
+ list(state_events.values())
|
||||
+ list(auth_events.values())
|
||||
for event in events + list(state_events.values())
|
||||
for a_id in event.auth_event_ids()
|
||||
}
|
||||
auth_events = await self.store.get_events(required_auth, allow_rejected=True)
|
||||
auth_events.update(
|
||||
{e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
|
||||
)
|
||||
@@ -1673,8 +1659,40 @@ class FederationHandler(BaseHandler):
|
||||
# would introduce the danger of backwards-compatibility problems.
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
# Calculate the event context.
|
||||
context = await self.state_handler.compute_event_context(event)
|
||||
context = await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
# Get the state before the new event.
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
# Check if the user is already in the room or invited to the room.
|
||||
user_id = event.state_key
|
||||
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
||||
newly_joined = True
|
||||
user_is_invited = False
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
user_is_invited = prev_member_event.membership == Membership.INVITE
|
||||
|
||||
# If the member is not already in the room, and not invited, check if
|
||||
# they should be allowed access via membership in a space.
|
||||
if (
|
||||
newly_joined
|
||||
and not user_is_invited
|
||||
and not await self._event_auth_handler.can_join_without_invite(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
)
|
||||
):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
|
||||
# Persist the event.
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: After _auth_and_persist_event: %s, sigs: %s",
|
||||
@@ -1682,8 +1700,6 @@ class FederationHandler(BaseHandler):
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
state_ids = list(prev_state_ids.values())
|
||||
auth_chain = await self.store.get_auth_chain(event.room_id, state_ids)
|
||||
|
||||
@@ -2006,7 +2022,7 @@ class FederationHandler(BaseHandler):
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
) -> EventContext:
|
||||
) -> None:
|
||||
"""
|
||||
Process an event by performing auth checks and then persisting to the database.
|
||||
|
||||
@@ -2028,9 +2044,6 @@ class FederationHandler(BaseHandler):
|
||||
event is an outlier), may be the auth events claimed by the remote
|
||||
server.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
context = await self._check_event_auth(
|
||||
origin,
|
||||
@@ -2060,8 +2073,6 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
raise
|
||||
|
||||
return context
|
||||
|
||||
async def _auth_and_persist_events(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -2420,7 +2431,9 @@ class FederationHandler(BaseHandler):
|
||||
# If we are going to send this event over federation we precaclculate
|
||||
# the joined hosts.
|
||||
if event.internal_metadata.get_send_on_behalf_of():
|
||||
await self.event_creation_handler.cache_joined_hosts_for_event(event)
|
||||
await self.event_creation_handler.cache_joined_hosts_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
return context
|
||||
|
||||
@@ -2956,7 +2969,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
# for each sig on the third_party_invite block of the actual invite
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
for key_name in signature_block.keys():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
|
||||
|
||||
@@ -15,10 +15,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for interacting with Identity Servers"""
|
||||
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
@@ -34,17 +33,24 @@ from synapse.http.site import SynapseRequest
|
||||
from synapse.types import JsonDict, Requester
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.hash import sha256_and_url_safe_base64
|
||||
from synapse.util.stringutils import assert_valid_client_secret, random_string
|
||||
from synapse.util.stringutils import (
|
||||
assert_valid_client_secret,
|
||||
random_string,
|
||||
valid_id_server_location,
|
||||
)
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
id_server_scheme = "https://"
|
||||
|
||||
|
||||
class IdentityHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
# An HTTP client for contacting trusted URLs.
|
||||
@@ -77,7 +83,7 @@ class IdentityHandler(BaseHandler):
|
||||
request: SynapseRequest,
|
||||
medium: str,
|
||||
address: str,
|
||||
):
|
||||
) -> None:
|
||||
"""Used to ratelimit requests to `/requestToken` by IP and address.
|
||||
|
||||
Args:
|
||||
@@ -172,6 +178,11 @@ class IdentityHandler(BaseHandler):
|
||||
server with, if necessary. Required if use_v2 is true
|
||||
use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True
|
||||
|
||||
Raises:
|
||||
SynapseError: On any of the following conditions
|
||||
- the supplied id_server is not a valid identity server name
|
||||
- we failed to contact the supplied identity server
|
||||
|
||||
Returns:
|
||||
The response from the identity server
|
||||
"""
|
||||
@@ -181,6 +192,12 @@ class IdentityHandler(BaseHandler):
|
||||
if id_access_token is None:
|
||||
use_v2 = False
|
||||
|
||||
if not valid_id_server_location(id_server):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"id_server must be a valid hostname with optional port and path components",
|
||||
)
|
||||
|
||||
# Decide which API endpoint URLs to use
|
||||
headers = {}
|
||||
bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
|
||||
@@ -269,12 +286,21 @@ class IdentityHandler(BaseHandler):
|
||||
id_server: Identity server to unbind from
|
||||
|
||||
Raises:
|
||||
SynapseError: If we failed to contact the identity server
|
||||
SynapseError: On any of the following conditions
|
||||
- the supplied id_server is not a valid identity server name
|
||||
- we failed to contact the supplied identity server
|
||||
|
||||
Returns:
|
||||
True on success, otherwise False if the identity
|
||||
server doesn't support unbinding
|
||||
"""
|
||||
|
||||
if not valid_id_server_location(id_server):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"id_server must be a valid hostname with optional port and path components",
|
||||
)
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
||||
url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
|
||||
|
||||
|
||||
@@ -15,10 +15,11 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse import event_auth
|
||||
@@ -43,14 +44,15 @@ from synapse.events import EventBase
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
|
||||
from synapse.util import json_decoder, json_encoder
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util import json_decoder, json_encoder, log_failure
|
||||
from synapse.util.async_helpers import Linearizer, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
@@ -66,7 +68,7 @@ logger = logging.getLogger(__name__)
|
||||
class MessageHandler:
|
||||
"""Contains some read only APIs to get state about a room"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.auth = hs.get_auth()
|
||||
self.clock = hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -91,7 +93,7 @@ class MessageHandler:
|
||||
room_id: str,
|
||||
event_type: str,
|
||||
state_key: str,
|
||||
) -> dict:
|
||||
) -> Optional[EventBase]:
|
||||
"""Get data from a room.
|
||||
|
||||
Args:
|
||||
@@ -115,6 +117,10 @@ class MessageHandler:
|
||||
data = await self.state.get_current_state(room_id, event_type, state_key)
|
||||
elif membership == Membership.LEAVE:
|
||||
key = (event_type, state_key)
|
||||
# If the membership is not JOIN, then the event ID should exist.
|
||||
assert (
|
||||
membership_event_id is not None
|
||||
), "check_user_in_room_or_world_readable returned invalid data"
|
||||
room_state = await self.state_store.get_state_for_events(
|
||||
[membership_event_id], StateFilter.from_types([key])
|
||||
)
|
||||
@@ -186,10 +192,12 @@ class MessageHandler:
|
||||
|
||||
event = last_events[0]
|
||||
if visible_events:
|
||||
room_state = await self.state_store.get_state_for_events(
|
||||
room_state_events = await self.state_store.get_state_for_events(
|
||||
[event.event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[event.event_id]
|
||||
room_state = room_state_events[
|
||||
event.event_id
|
||||
] # type: Mapping[Any, EventBase]
|
||||
else:
|
||||
raise AuthError(
|
||||
403,
|
||||
@@ -210,10 +218,14 @@ class MessageHandler:
|
||||
)
|
||||
room_state = await self.store.get_events(state_ids.values())
|
||||
elif membership == Membership.LEAVE:
|
||||
room_state = await self.state_store.get_state_for_events(
|
||||
# If the membership is not JOIN, then the event ID should exist.
|
||||
assert (
|
||||
membership_event_id is not None
|
||||
), "check_user_in_room_or_world_readable returned invalid data"
|
||||
room_state_events = await self.state_store.get_state_for_events(
|
||||
[membership_event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[membership_event_id]
|
||||
room_state = room_state_events[membership_event_id]
|
||||
|
||||
now = self.clock.time_msec()
|
||||
events = await self._event_serializer.serialize_events(
|
||||
@@ -248,7 +260,7 @@ class MessageHandler:
|
||||
"Getting joined members after leaving is not implemented"
|
||||
)
|
||||
|
||||
users_with_profile = await self.state.get_current_users_in_room(room_id)
|
||||
users_with_profile = await self.store.get_users_in_room_with_profiles(room_id)
|
||||
|
||||
# If this is an AS, double check that they are allowed to see the members.
|
||||
# This can either be because the AS user is in the room or because there
|
||||
@@ -447,6 +459,19 @@ class EventCreationHandler:
|
||||
|
||||
self._external_cache = hs.get_external_cache()
|
||||
|
||||
# Stores the state groups we've recently added to the joined hosts
|
||||
# external cache. Note that the timeout must be significantly less than
|
||||
# the TTL on the external cache.
|
||||
self._external_cache_joined_hosts_updates = (
|
||||
None
|
||||
) # type: Optional[ExpiringCache]
|
||||
if self._external_cache.is_enabled():
|
||||
self._external_cache_joined_hosts_updates = ExpiringCache(
|
||||
"_external_cache_joined_hosts_updates",
|
||||
self.clock,
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
)
|
||||
|
||||
async def create_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -955,9 +980,43 @@ class EventCreationHandler:
|
||||
logger.exception("Failed to encode content: %r", event.content)
|
||||
raise
|
||||
|
||||
await self.action_generator.handle_push_actions_for_event(event, context)
|
||||
# We now persist the event (and update the cache in parallel, since we
|
||||
# don't want to block on it).
|
||||
result = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
),
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
await self.cache_joined_hosts_for_event(event)
|
||||
return result[0]
|
||||
|
||||
async def _persist_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
ratelimit: bool = True,
|
||||
extra_users: Optional[List[UserID]] = None,
|
||||
) -> EventBase:
|
||||
"""Actually persists the event. Should only be called by
|
||||
`handle_new_client_event`, and see its docstring for documentation of
|
||||
the arguments.
|
||||
"""
|
||||
|
||||
await self.action_generator.handle_push_actions_for_event(event, context)
|
||||
|
||||
try:
|
||||
# If we're a worker we need to hit out to the master.
|
||||
@@ -998,7 +1057,9 @@ class EventCreationHandler:
|
||||
await self.store.remove_push_actions_from_staging(event.event_id)
|
||||
raise
|
||||
|
||||
async def cache_joined_hosts_for_event(self, event: EventBase) -> None:
|
||||
async def cache_joined_hosts_for_event(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> None:
|
||||
"""Precalculate the joined hosts at the event, when using Redis, so that
|
||||
external federation senders don't have to recalculate it themselves.
|
||||
"""
|
||||
@@ -1006,6 +1067,9 @@ class EventCreationHandler:
|
||||
if not self._external_cache.is_enabled():
|
||||
return
|
||||
|
||||
# If external cache is enabled we should always have this.
|
||||
assert self._external_cache_joined_hosts_updates is not None
|
||||
|
||||
# We actually store two mappings, event ID -> prev state group,
|
||||
# state group -> joined hosts, which is much more space efficient
|
||||
# than event ID -> joined hosts.
|
||||
@@ -1013,22 +1077,28 @@ class EventCreationHandler:
|
||||
# Note: We have to cache event ID -> prev state group, as we don't
|
||||
# store that in the DB.
|
||||
#
|
||||
# Note: We always set the state group -> joined hosts cache, even if
|
||||
# we already set it, so that the expiry time is reset.
|
||||
# Note: We set the state group -> joined hosts cache if it hasn't been
|
||||
# set for a while, so that the expiry time is reset.
|
||||
|
||||
state_entry = await self.state.resolve_state_groups_for_events(
|
||||
event.room_id, event_ids=event.prev_event_ids()
|
||||
)
|
||||
|
||||
if state_entry.state_group:
|
||||
joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry)
|
||||
|
||||
await self._external_cache.set(
|
||||
"event_to_prev_state_group",
|
||||
event.event_id,
|
||||
state_entry.state_group,
|
||||
expiry_ms=60 * 60 * 1000,
|
||||
)
|
||||
|
||||
if state_entry.state_group in self._external_cache_joined_hosts_updates:
|
||||
return
|
||||
|
||||
joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry)
|
||||
|
||||
# Note that the expiry times must be larger than the expiry time in
|
||||
# _external_cache_joined_hosts_updates.
|
||||
await self._external_cache.set(
|
||||
"get_joined_hosts",
|
||||
str(state_entry.state_group),
|
||||
@@ -1036,6 +1106,8 @@ class EventCreationHandler:
|
||||
expiry_ms=60 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._external_cache_joined_hosts_updates[state_entry.state_group] = None
|
||||
|
||||
async def _validate_canonical_alias(
|
||||
self, directory_handler, room_alias_str: str, expected_room_id: str
|
||||
) -> None:
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar, Union
|
||||
from urllib.parse import urlencode
|
||||
from urllib.parse import urlencode, urlparse
|
||||
|
||||
import attr
|
||||
import pymacaroons
|
||||
@@ -37,10 +37,7 @@ from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.oidc_config import (
|
||||
OidcProviderClientSecretJwtKey,
|
||||
OidcProviderConfig,
|
||||
)
|
||||
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
|
||||
from synapse.handlers.sso import MappingException, UserAttributes
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
@@ -71,8 +68,8 @@ logger = logging.getLogger(__name__)
|
||||
#
|
||||
# Here we have the names of the cookies, and the options we use to set them.
|
||||
_SESSION_COOKIES = [
|
||||
(b"oidc_session", b"Path=/_synapse/client/oidc; HttpOnly; Secure; SameSite=None"),
|
||||
(b"oidc_session_no_samesite", b"Path=/_synapse/client/oidc; HttpOnly"),
|
||||
(b"oidc_session", b"HttpOnly; Secure; SameSite=None"),
|
||||
(b"oidc_session_no_samesite", b"HttpOnly"),
|
||||
]
|
||||
|
||||
#: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and
|
||||
@@ -282,6 +279,13 @@ class OidcProvider:
|
||||
self._config = provider
|
||||
self._callback_url = hs.config.oidc_callback_url # type: str
|
||||
|
||||
# Calculate the prefix for OIDC callback paths based on the public_baseurl.
|
||||
# We'll insert this into the Path= parameter of any session cookies we set.
|
||||
public_baseurl_path = urlparse(hs.config.server.public_baseurl).path
|
||||
self._callback_path_prefix = (
|
||||
public_baseurl_path.encode("utf-8") + b"_synapse/client/oidc"
|
||||
)
|
||||
|
||||
self._oidc_attribute_requirements = provider.attribute_requirements
|
||||
self._scopes = provider.scopes
|
||||
self._user_profile_method = provider.user_profile_method
|
||||
@@ -782,8 +786,13 @@ class OidcProvider:
|
||||
|
||||
for cookie_name, options in _SESSION_COOKIES:
|
||||
request.cookies.append(
|
||||
b"%s=%s; Max-Age=3600; %s"
|
||||
% (cookie_name, cookie.encode("utf-8"), options)
|
||||
b"%s=%s; Max-Age=3600; Path=%s; %s"
|
||||
% (
|
||||
cookie_name,
|
||||
cookie.encode("utf-8"),
|
||||
self._callback_path_prefix,
|
||||
options,
|
||||
)
|
||||
)
|
||||
|
||||
metadata = await self.load_metadata()
|
||||
@@ -960,6 +969,11 @@ class OidcProvider:
|
||||
# and attempt to match it.
|
||||
attributes = await oidc_response_to_user_attributes(failures=0)
|
||||
|
||||
if attributes.localpart is None:
|
||||
# If no localpart is returned then we will generate one, so
|
||||
# there is no need to search for existing users.
|
||||
return None
|
||||
|
||||
user_id = UserID(attributes.localpart, self._server_name).to_string()
|
||||
users = await self._store.get_users_by_id_case_insensitive(user_id)
|
||||
if users:
|
||||
@@ -28,6 +28,8 @@ from bisect import bisect
|
||||
from contextlib import contextmanager
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
Iterable,
|
||||
@@ -57,9 +59,8 @@ from synapse.replication.http.presence import (
|
||||
from synapse.replication.http.streams import ReplicationGetStreamUpdates
|
||||
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
||||
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
|
||||
from synapse.state import StateHandler
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Collection, JsonDict, UserID, get_domain_from_id
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -121,13 +122,21 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
|
||||
|
||||
|
||||
class BasePresenceHandler(abc.ABC):
|
||||
"""Parts of the PresenceHandler that are shared between workers and master"""
|
||||
"""Parts of the PresenceHandler that are shared between workers and presence
|
||||
writer"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.presence_router = hs.get_presence_router()
|
||||
self.state = hs.get_state_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.federation_queue = PresenceFederationQueue(hs, self)
|
||||
self._federation = None
|
||||
if hs.should_send_federation():
|
||||
self._federation = hs.get_federation_sender()
|
||||
|
||||
self._federation_queue = PresenceFederationQueue(hs, self)
|
||||
|
||||
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
||||
|
||||
@@ -224,23 +233,23 @@ class BasePresenceHandler(abc.ABC):
|
||||
"""
|
||||
|
||||
async def update_external_syncs_row(
|
||||
self, process_id, user_id, is_syncing, sync_time_msec
|
||||
):
|
||||
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
|
||||
) -> None:
|
||||
"""Update the syncing users for an external process as a delta.
|
||||
|
||||
This is a no-op when presence is handled by a different worker.
|
||||
|
||||
Args:
|
||||
process_id (str): An identifier for the process the users are
|
||||
process_id: An identifier for the process the users are
|
||||
syncing against. This allows synapse to process updates
|
||||
as user start and stop syncing against a given process.
|
||||
user_id (str): The user who has started or stopped syncing
|
||||
is_syncing (bool): Whether or not the user is now syncing
|
||||
sync_time_msec(int): Time in ms when the user was last syncing
|
||||
user_id: The user who has started or stopped syncing
|
||||
is_syncing: Whether or not the user is now syncing
|
||||
sync_time_msec: Time in ms when the user was last syncing
|
||||
"""
|
||||
pass
|
||||
|
||||
async def update_external_syncs_clear(self, process_id):
|
||||
async def update_external_syncs_clear(self, process_id: str) -> None:
|
||||
"""Marks all users that had been marked as syncing by a given process
|
||||
as offline.
|
||||
|
||||
@@ -253,14 +262,39 @@ class BasePresenceHandler(abc.ABC):
|
||||
async def process_replication_rows(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
):
|
||||
"""Process presence stream rows received over replication."""
|
||||
await self.federation_queue.process_replication_rows(
|
||||
"""Process streams received over replication."""
|
||||
await self._federation_queue.process_replication_rows(
|
||||
stream_name, instance_name, token, rows
|
||||
)
|
||||
|
||||
def get_federation_queue(self) -> "PresenceFederationQueue":
|
||||
"""Get the presence federation queue, if any."""
|
||||
return self.federation_queue
|
||||
"""Get the presence federation queue."""
|
||||
return self._federation_queue
|
||||
|
||||
async def maybe_send_presence_to_interested_destinations(
|
||||
self, states: List[UserPresenceState]
|
||||
):
|
||||
"""If this instance is a federation sender, send the states to all
|
||||
destinations that are interested. Filters out any states for remote
|
||||
users.
|
||||
"""
|
||||
|
||||
if not self._federation:
|
||||
return
|
||||
|
||||
states = [s for s in states if self.is_mine_id(s.user_id)]
|
||||
|
||||
if not states:
|
||||
return
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self.presence_router,
|
||||
states,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
self._federation.send_presence_to_destinations(states, destinations)
|
||||
|
||||
|
||||
class _NullContextManager(ContextManager[None]):
|
||||
@@ -271,14 +305,19 @@ class _NullContextManager(ContextManager[None]):
|
||||
|
||||
|
||||
class WorkerPresenceHandler(BasePresenceHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.presence_router = hs.get_presence_router()
|
||||
self._presence_writer_instance = hs.config.worker.writers.presence[0]
|
||||
|
||||
self._presence_enabled = hs.config.use_presence
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
# Route presence EDUs to the right worker
|
||||
hs.get_federation_registry().register_instances_for_edu(
|
||||
"m.presence",
|
||||
hs.config.worker.writers.presence,
|
||||
)
|
||||
|
||||
# The number of ongoing syncs on this process, by user id.
|
||||
# Empty if _presence_enabled is false.
|
||||
@@ -287,11 +326,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self.notifier = hs.get_notifier()
|
||||
self.instance_id = hs.get_instance_id()
|
||||
|
||||
self._federation = hs.get_federation_sender()
|
||||
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||
# but we haven't notified the master of that yet
|
||||
self.users_going_offline = {}
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing but
|
||||
# we haven't notified the presence writer of that yet
|
||||
self.users_going_offline = {} # type: Dict[str, int]
|
||||
|
||||
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
|
||||
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
|
||||
@@ -310,44 +347,39 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self._on_shutdown,
|
||||
)
|
||||
|
||||
def _on_shutdown(self):
|
||||
def _on_shutdown(self) -> None:
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_command(
|
||||
ClearUserSyncsCommand(self.instance_id)
|
||||
)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
|
||||
if self._presence_enabled:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
self.instance_id, user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
had recently stopped syncing.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
def mark_as_coming_online(self, user_id: str) -> None:
|
||||
"""A user has started syncing. Send a UserSync to the presence writer,
|
||||
unless they had recently stopped syncing.
|
||||
"""
|
||||
going_offline = self.users_going_offline.pop(user_id, None)
|
||||
if not going_offline:
|
||||
# Safe to skip because we haven't yet told the master they were offline
|
||||
# Safe to skip because we haven't yet told the presence writer they
|
||||
# were offline
|
||||
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||
|
||||
def mark_as_going_offline(self, user_id):
|
||||
"""A user has stopped syncing. We wait before notifying the master as
|
||||
its likely they'll come back soon. This allows us to avoid sending
|
||||
a stopped syncing immediately followed by a started syncing notification
|
||||
to the master
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
def mark_as_going_offline(self, user_id: str) -> None:
|
||||
"""A user has stopped syncing. We wait before notifying the presence
|
||||
writer as its likely they'll come back soon. This allows us to avoid
|
||||
sending a stopped syncing immediately followed by a started syncing
|
||||
notification to the presence writer
|
||||
"""
|
||||
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||
|
||||
def send_stop_syncing(self):
|
||||
"""Check if there are any users who have stopped syncing a while ago
|
||||
and haven't come back yet. If there are poke the master about them.
|
||||
def send_stop_syncing(self) -> None:
|
||||
"""Check if there are any users who have stopped syncing a while ago and
|
||||
haven't come back yet. If there are poke the presence writer about them.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
||||
@@ -393,7 +425,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
|
||||
return _user_syncing()
|
||||
|
||||
async def notify_from_replication(self, states, stream_id):
|
||||
async def notify_from_replication(
|
||||
self, states: List[UserPresenceState], stream_id: int
|
||||
) -> None:
|
||||
parties = await get_interested_parties(self.store, self.presence_router, states)
|
||||
room_ids_to_states, users_to_states = parties
|
||||
|
||||
@@ -404,6 +438,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
users=users_to_states.keys(),
|
||||
)
|
||||
|
||||
# If this is a federation sender, notify about presence updates.
|
||||
await self.maybe_send_presence_to_interested_destinations(states)
|
||||
|
||||
async def process_replication_rows(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
):
|
||||
@@ -431,20 +468,6 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
stream_id = token
|
||||
await self.notify_from_replication(states, stream_id)
|
||||
|
||||
# Handle poking the local federation sender, if there is one.
|
||||
if not self._federation:
|
||||
return
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self.presence_router,
|
||||
states,
|
||||
self.state,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
self._federation.send_presence_to_destinations(states, destinations)
|
||||
|
||||
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
||||
return [
|
||||
user_id
|
||||
@@ -452,7 +475,12 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
if count > 0
|
||||
]
|
||||
|
||||
async def set_state(self, target_user, state, ignore_status_msg=False):
|
||||
async def set_state(
|
||||
self,
|
||||
target_user: UserID,
|
||||
state: JsonDict,
|
||||
ignore_status_msg: bool = False,
|
||||
) -> None:
|
||||
"""Set the presence state of the user."""
|
||||
presence = state["presence"]
|
||||
|
||||
@@ -474,12 +502,15 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
if not self.hs.config.use_presence:
|
||||
return
|
||||
|
||||
# Proxy request to master
|
||||
# Proxy request to instance that writes presence
|
||||
await self._set_state_client(
|
||||
user_id=user_id, state=state, ignore_status_msg=ignore_status_msg
|
||||
instance_name=self._presence_writer_instance,
|
||||
user_id=user_id,
|
||||
state=state,
|
||||
ignore_status_msg=ignore_status_msg,
|
||||
)
|
||||
|
||||
async def bump_presence_active_time(self, user):
|
||||
async def bump_presence_active_time(self, user: UserID) -> None:
|
||||
"""We've seen the user do something that indicates they're interacting
|
||||
with the app.
|
||||
"""
|
||||
@@ -487,25 +518,22 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
if not self.hs.config.use_presence:
|
||||
return
|
||||
|
||||
# Proxy request to master
|
||||
# Proxy request to instance that writes presence
|
||||
user_id = user.to_string()
|
||||
await self._bump_active_client(user_id=user_id)
|
||||
await self._bump_active_client(
|
||||
instance_name=self._presence_writer_instance, user_id=user_id
|
||||
)
|
||||
|
||||
|
||||
class PresenceHandler(BasePresenceHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.server_name = hs.hostname
|
||||
self.wheel_timer = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
self.presence_router = hs.get_presence_router()
|
||||
self._presence_enabled = hs.config.use_presence
|
||||
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
federation_registry.register_edu_handler("m.presence", self.incoming_presence)
|
||||
@@ -566,8 +594,8 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# we assume that all the sync requests on that process have stopped.
|
||||
# Stored as a dict from process_id to set of user_id, and a dict of
|
||||
# process_id to millisecond timestamp last updated.
|
||||
self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]]
|
||||
self.external_process_last_updated_ms = {} # type: Dict[int, int]
|
||||
self.external_process_to_current_syncs = {} # type: Dict[str, Set[str]]
|
||||
self.external_process_last_updated_ms = {} # type: Dict[str, int]
|
||||
|
||||
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
|
||||
|
||||
@@ -607,7 +635,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self._event_pos = self.store.get_current_events_token()
|
||||
self._event_processing = False
|
||||
|
||||
async def _on_shutdown(self):
|
||||
async def _on_shutdown(self) -> None:
|
||||
"""Gets called when shutting down. This lets us persist any updates that
|
||||
we haven't yet persisted, e.g. updates that only changes some internal
|
||||
timers. This allows changes to persist across startup without having to
|
||||
@@ -636,7 +664,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
logger.info("Finished _on_shutdown")
|
||||
|
||||
async def _persist_unpersisted_changes(self):
|
||||
async def _persist_unpersisted_changes(self) -> None:
|
||||
"""We periodically persist the unpersisted changes, as otherwise they
|
||||
may stack up and slow down shutdown times.
|
||||
"""
|
||||
@@ -710,6 +738,13 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self.unpersisted_users_changes |= {s.user_id for s in new_states}
|
||||
self.unpersisted_users_changes -= set(to_notify.keys())
|
||||
|
||||
# Check if we need to resend any presence states to remote hosts. We
|
||||
# only do this for states that haven't been updated in a while to
|
||||
# ensure that the remote host doesn't time the presence state out.
|
||||
#
|
||||
# Note that since these are states that have *not* been updated,
|
||||
# they won't get sent down the normal presence replication stream,
|
||||
# and so we have to explicitly send them via the federation stream.
|
||||
to_federation_ping = {
|
||||
user_id: state
|
||||
for user_id, state in to_federation_ping.items()
|
||||
@@ -722,15 +757,14 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self.store,
|
||||
self.presence_router,
|
||||
list(to_federation_ping.values()),
|
||||
self.state,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
self.federation_queue.send_presence_to_destinations(
|
||||
self._federation_queue.send_presence_to_destinations(
|
||||
states, destinations
|
||||
)
|
||||
|
||||
async def _handle_timeouts(self):
|
||||
async def _handle_timeouts(self) -> None:
|
||||
"""Checks the presence of users that have timed out and updates as
|
||||
appropriate.
|
||||
"""
|
||||
@@ -782,7 +816,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
return await self._update_states(changes)
|
||||
|
||||
async def bump_presence_active_time(self, user):
|
||||
async def bump_presence_active_time(self, user: UserID) -> None:
|
||||
"""We've seen the user do something that indicates they're interacting
|
||||
with the app.
|
||||
"""
|
||||
@@ -879,17 +913,17 @@ class PresenceHandler(BasePresenceHandler):
|
||||
return []
|
||||
|
||||
async def update_external_syncs_row(
|
||||
self, process_id, user_id, is_syncing, sync_time_msec
|
||||
):
|
||||
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
|
||||
) -> None:
|
||||
"""Update the syncing users for an external process as a delta.
|
||||
|
||||
Args:
|
||||
process_id (str): An identifier for the process the users are
|
||||
process_id: An identifier for the process the users are
|
||||
syncing against. This allows synapse to process updates
|
||||
as user start and stop syncing against a given process.
|
||||
user_id (str): The user who has started or stopped syncing
|
||||
is_syncing (bool): Whether or not the user is now syncing
|
||||
sync_time_msec(int): Time in ms when the user was last syncing
|
||||
user_id: The user who has started or stopped syncing
|
||||
is_syncing: Whether or not the user is now syncing
|
||||
sync_time_msec: Time in ms when the user was last syncing
|
||||
"""
|
||||
with (await self.external_sync_linearizer.queue(process_id)):
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
@@ -926,7 +960,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
|
||||
|
||||
async def update_external_syncs_clear(self, process_id):
|
||||
async def update_external_syncs_clear(self, process_id: str) -> None:
|
||||
"""Marks all users that had been marked as syncing by a given process
|
||||
as offline.
|
||||
|
||||
@@ -947,12 +981,12 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
self.external_process_last_updated_ms.pop(process_id, None)
|
||||
|
||||
async def current_state_for_user(self, user_id):
|
||||
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
|
||||
"""Get the current presence state for a user."""
|
||||
res = await self.current_state_for_users([user_id])
|
||||
return res[user_id]
|
||||
|
||||
async def _persist_and_notify(self, states):
|
||||
async def _persist_and_notify(self, states: List[UserPresenceState]) -> None:
|
||||
"""Persist states in the database, poke the notifier and send to
|
||||
interested remote servers
|
||||
"""
|
||||
@@ -968,23 +1002,12 @@ class PresenceHandler(BasePresenceHandler):
|
||||
users=[UserID.from_string(u) for u in users_to_states],
|
||||
)
|
||||
|
||||
# We only need to tell the local federation sender, if any, that new
|
||||
# presence has happened. Other federation senders will get notified via
|
||||
# the presence replication stream.
|
||||
if not self.federation_sender:
|
||||
return
|
||||
# We only want to poke the local federation sender, if any, as other
|
||||
# workers will receive the presence updates via the presence replication
|
||||
# stream (which is updated by `store.update_presence`).
|
||||
await self.maybe_send_presence_to_interested_destinations(states)
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self.presence_router,
|
||||
states,
|
||||
self.state,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
self.federation_sender.send_presence_to_destinations(states, destinations)
|
||||
|
||||
async def incoming_presence(self, origin, content):
|
||||
async def incoming_presence(self, origin: str, content: JsonDict) -> None:
|
||||
"""Called when we receive a `m.presence` EDU from a remote server."""
|
||||
if not self._presence_enabled:
|
||||
return
|
||||
@@ -1034,7 +1057,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
federation_presence_counter.inc(len(updates))
|
||||
await self._update_states(updates)
|
||||
|
||||
async def set_state(self, target_user, state, ignore_status_msg=False):
|
||||
async def set_state(
|
||||
self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
|
||||
) -> None:
|
||||
"""Set the presence state of the user."""
|
||||
status_msg = state.get("status_msg", None)
|
||||
presence = state["presence"]
|
||||
@@ -1068,7 +1093,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
await self._update_states([prev_state.copy_and_replace(**new_fields)])
|
||||
|
||||
async def is_visible(self, observed_user, observer_user):
|
||||
async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
|
||||
"""Returns whether a user can see another user's presence."""
|
||||
observer_room_ids = await self.store.get_rooms_for_user(
|
||||
observer_user.to_string()
|
||||
@@ -1123,7 +1148,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
return rows
|
||||
|
||||
def notify_new_event(self):
|
||||
def notify_new_event(self) -> None:
|
||||
"""Called when new events have happened. Handles users and servers
|
||||
joining rooms and require being sent presence.
|
||||
"""
|
||||
@@ -1142,7 +1167,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
|
||||
run_as_background_process("presence.notify_new_event", _process_presence)
|
||||
|
||||
async def _unsafe_process(self):
|
||||
async def _unsafe_process(self) -> None:
|
||||
# Loop round handling deltas until we're up to date
|
||||
while True:
|
||||
with Measure(self.clock, "presence_delta"):
|
||||
@@ -1158,7 +1183,16 @@ class PresenceHandler(BasePresenceHandler):
|
||||
max_pos, deltas = await self.store.get_current_state_deltas(
|
||||
self._event_pos, room_max_stream_ordering
|
||||
)
|
||||
await self._handle_state_delta(deltas)
|
||||
|
||||
# We may get multiple deltas for different rooms, but we want to
|
||||
# handle them on a room by room basis, so we batch them up by
|
||||
# room.
|
||||
deltas_by_room: Dict[str, List[JsonDict]] = {}
|
||||
for delta in deltas:
|
||||
deltas_by_room.setdefault(delta["room_id"], []).append(delta)
|
||||
|
||||
for room_id, deltas_for_room in deltas_by_room.items():
|
||||
await self._handle_state_delta(room_id, deltas_for_room)
|
||||
|
||||
self._event_pos = max_pos
|
||||
|
||||
@@ -1167,17 +1201,21 @@ class PresenceHandler(BasePresenceHandler):
|
||||
max_pos
|
||||
)
|
||||
|
||||
async def _handle_state_delta(self, deltas):
|
||||
"""Process current state deltas to find new joins that need to be
|
||||
handled.
|
||||
async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None:
|
||||
"""Process current state deltas for the room to find new joins that need
|
||||
to be handled.
|
||||
"""
|
||||
# A map of destination to a set of user state that they should receive
|
||||
presence_destinations = {} # type: Dict[str, Set[UserPresenceState]]
|
||||
|
||||
# Sets of newly joined users. Note that if the local server is
|
||||
# joining a remote room for the first time we'll see both the joining
|
||||
# user and all remote users as newly joined.
|
||||
newly_joined_users = set()
|
||||
|
||||
for delta in deltas:
|
||||
assert room_id == delta["room_id"]
|
||||
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
@@ -1206,72 +1244,55 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# Ignore changes to join events.
|
||||
continue
|
||||
|
||||
# Retrieve any user presence state updates that need to be sent as a result,
|
||||
# and the destinations that need to receive it
|
||||
destinations, user_presence_states = await self._on_user_joined_room(
|
||||
room_id, state_key
|
||||
)
|
||||
newly_joined_users.add(state_key)
|
||||
|
||||
# Insert the destinations and respective updates into our destinations dict
|
||||
for destination in destinations:
|
||||
presence_destinations.setdefault(destination, set()).update(
|
||||
user_presence_states
|
||||
)
|
||||
if not newly_joined_users:
|
||||
# If nobody has joined then there's nothing to do.
|
||||
return
|
||||
|
||||
# Send out user presence updates for each destination
|
||||
for destination, user_state_set in presence_destinations.items():
|
||||
self.federation_queue.send_presence_to_destinations(
|
||||
destinations=[destination], states=user_state_set
|
||||
)
|
||||
# We want to send:
|
||||
# 1. presence states of all local users in the room to newly joined
|
||||
# remote servers
|
||||
# 2. presence states of newly joined users to all remote servers in
|
||||
# the room.
|
||||
#
|
||||
# TODO: Only send presence states to remote hosts that don't already
|
||||
# have them (because they already share rooms).
|
||||
|
||||
async def _on_user_joined_room(
|
||||
self, room_id: str, user_id: str
|
||||
) -> Tuple[List[str], List[UserPresenceState]]:
|
||||
"""Called when we detect a user joining the room via the current state
|
||||
delta stream. Returns the destinations that need to be updated and the
|
||||
presence updates to send to them.
|
||||
# Get all the users who were already in the room, by fetching the
|
||||
# current users in the room and removing the newly joined users.
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
prev_users = set(users) - newly_joined_users
|
||||
|
||||
Args:
|
||||
room_id: The ID of the room that the user has joined.
|
||||
user_id: The ID of the user that has joined the room.
|
||||
# Construct sets for all the local users and remote hosts that were
|
||||
# already in the room
|
||||
prev_local_users = []
|
||||
prev_remote_hosts = set()
|
||||
for user_id in prev_users:
|
||||
if self.is_mine_id(user_id):
|
||||
prev_local_users.append(user_id)
|
||||
else:
|
||||
prev_remote_hosts.add(get_domain_from_id(user_id))
|
||||
|
||||
Returns:
|
||||
A tuple of destinations and presence updates to send to them.
|
||||
"""
|
||||
if self.is_mine_id(user_id):
|
||||
# If this is a local user then we need to send their presence
|
||||
# out to hosts in the room (who don't already have it)
|
||||
# Similarly, construct sets for all the local users and remote hosts
|
||||
# that were *not* already in the room. Care needs to be taken with the
|
||||
# calculating the remote hosts, as a host may have already been in the
|
||||
# room even if there is a newly joined user from that host.
|
||||
newly_joined_local_users = []
|
||||
newly_joined_remote_hosts = set()
|
||||
for user_id in newly_joined_users:
|
||||
if self.is_mine_id(user_id):
|
||||
newly_joined_local_users.append(user_id)
|
||||
else:
|
||||
host = get_domain_from_id(user_id)
|
||||
if host not in prev_remote_hosts:
|
||||
newly_joined_remote_hosts.add(host)
|
||||
|
||||
# TODO: We should be able to filter the hosts down to those that
|
||||
# haven't previously seen the user
|
||||
|
||||
remote_hosts = await self.state.get_current_hosts_in_room(room_id)
|
||||
|
||||
# Filter out ourselves.
|
||||
filtered_remote_hosts = [
|
||||
host for host in remote_hosts if host != self.server_name
|
||||
]
|
||||
|
||||
state = await self.current_state_for_user(user_id)
|
||||
return filtered_remote_hosts, [state]
|
||||
else:
|
||||
# A remote user has joined the room, so we need to:
|
||||
# 1. Check if this is a new server in the room
|
||||
# 2. If so send any presence they don't already have for
|
||||
# local users in the room.
|
||||
|
||||
# TODO: We should be able to filter the users down to those that
|
||||
# the server hasn't previously seen
|
||||
|
||||
# TODO: Check that this is actually a new server joining the
|
||||
# room.
|
||||
|
||||
remote_host = get_domain_from_id(user_id)
|
||||
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
user_ids = list(filter(self.is_mine_id, users))
|
||||
|
||||
states_d = await self.current_state_for_users(user_ids)
|
||||
# Send presence states of all local users in the room to newly joined
|
||||
# remote servers. (We actually only send states for local users already
|
||||
# in the room, as we'll send states for newly joined local users below.)
|
||||
if prev_local_users and newly_joined_remote_hosts:
|
||||
local_states = await self.current_state_for_users(prev_local_users)
|
||||
|
||||
# Filter out old presence, i.e. offline presence states where
|
||||
# the user hasn't been active for a week. We can change this
|
||||
@@ -1281,16 +1302,30 @@ class PresenceHandler(BasePresenceHandler):
|
||||
now = self.clock.time_msec()
|
||||
states = [
|
||||
state
|
||||
for state in states_d.values()
|
||||
for state in local_states.values()
|
||||
if state.state != PresenceState.OFFLINE
|
||||
or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
|
||||
or state.status_msg is not None
|
||||
]
|
||||
|
||||
return [remote_host], states
|
||||
self._federation_queue.send_presence_to_destinations(
|
||||
destinations=newly_joined_remote_hosts,
|
||||
states=states,
|
||||
)
|
||||
|
||||
# Send presence states of newly joined users to all remote servers in
|
||||
# the room
|
||||
if newly_joined_local_users and (
|
||||
prev_remote_hosts or newly_joined_remote_hosts
|
||||
):
|
||||
local_states = await self.current_state_for_users(newly_joined_local_users)
|
||||
self._federation_queue.send_presence_to_destinations(
|
||||
destinations=prev_remote_hosts | newly_joined_remote_hosts,
|
||||
states=list(local_states.values()),
|
||||
)
|
||||
|
||||
|
||||
def should_notify(old_state, new_state):
|
||||
def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
|
||||
"""Decides if a presence state change should be sent to interested parties."""
|
||||
if old_state == new_state:
|
||||
return False
|
||||
@@ -1326,7 +1361,9 @@ def should_notify(old_state, new_state):
|
||||
return False
|
||||
|
||||
|
||||
def format_user_presence_state(state, now, include_user_id=True):
|
||||
def format_user_presence_state(
|
||||
state: UserPresenceState, now: int, include_user_id: bool = True
|
||||
) -> JsonDict:
|
||||
"""Convert UserPresenceState to a format that can be sent down to clients
|
||||
and to other servers.
|
||||
|
||||
@@ -1360,16 +1397,15 @@ class PresenceEventSource:
|
||||
self.get_presence_router = hs.get_presence_router
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@log_function
|
||||
async def get_new_events(
|
||||
self,
|
||||
user,
|
||||
from_key,
|
||||
room_ids=None,
|
||||
include_offline=True,
|
||||
explicit_room_id=None,
|
||||
user: UserID,
|
||||
from_key: Optional[int],
|
||||
room_ids: Optional[List[str]] = None,
|
||||
include_offline: bool = True,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> Tuple[List[UserPresenceState], int]:
|
||||
# The process for getting presence events are:
|
||||
@@ -1574,7 +1610,7 @@ class PresenceEventSource:
|
||||
if update.state != PresenceState.OFFLINE
|
||||
]
|
||||
|
||||
def get_current_key(self):
|
||||
def get_current_key(self) -> int:
|
||||
return self.store.get_current_presence_token()
|
||||
|
||||
@cached(num_args=2, cache_context=True)
|
||||
@@ -1634,15 +1670,20 @@ class PresenceEventSource:
|
||||
return users_interested_in
|
||||
|
||||
|
||||
def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
|
||||
def handle_timeouts(
|
||||
user_states: List[UserPresenceState],
|
||||
is_mine_fn: Callable[[str], bool],
|
||||
syncing_user_ids: Set[str],
|
||||
now: int,
|
||||
) -> List[UserPresenceState]:
|
||||
"""Checks the presence of users that have timed out and updates as
|
||||
appropriate.
|
||||
|
||||
Args:
|
||||
user_states(list): List of UserPresenceState's to check.
|
||||
is_mine_fn (fn): Function that returns if a user_id is ours
|
||||
syncing_user_ids (set): Set of user_ids with active syncs.
|
||||
now (int): Current time in ms.
|
||||
user_states: List of UserPresenceState's to check.
|
||||
is_mine_fn: Function that returns if a user_id is ours
|
||||
syncing_user_ids: Set of user_ids with active syncs.
|
||||
now: Current time in ms.
|
||||
|
||||
Returns:
|
||||
List of UserPresenceState updates
|
||||
@@ -1659,14 +1700,16 @@ def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
|
||||
return list(changes.values())
|
||||
|
||||
|
||||
def handle_timeout(state, is_mine, syncing_user_ids, now):
|
||||
def handle_timeout(
|
||||
state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int
|
||||
) -> Optional[UserPresenceState]:
|
||||
"""Checks the presence of the user to see if any of the timers have elapsed
|
||||
|
||||
Args:
|
||||
state (UserPresenceState)
|
||||
is_mine (bool): Whether the user is ours
|
||||
syncing_user_ids (set): Set of user_ids with active syncs.
|
||||
now (int): Current time in ms.
|
||||
state
|
||||
is_mine: Whether the user is ours
|
||||
syncing_user_ids: Set of user_ids with active syncs.
|
||||
now: Current time in ms.
|
||||
|
||||
Returns:
|
||||
A UserPresenceState update or None if no update.
|
||||
@@ -1718,23 +1761,29 @@ def handle_timeout(state, is_mine, syncing_user_ids, now):
|
||||
return state if changed else None
|
||||
|
||||
|
||||
def handle_update(prev_state, new_state, is_mine, wheel_timer, now):
|
||||
def handle_update(
|
||||
prev_state: UserPresenceState,
|
||||
new_state: UserPresenceState,
|
||||
is_mine: bool,
|
||||
wheel_timer: WheelTimer,
|
||||
now: int,
|
||||
) -> Tuple[UserPresenceState, bool, bool]:
|
||||
"""Given a presence update:
|
||||
1. Add any appropriate timers.
|
||||
2. Check if we should notify anyone.
|
||||
|
||||
Args:
|
||||
prev_state (UserPresenceState)
|
||||
new_state (UserPresenceState)
|
||||
is_mine (bool): Whether the user is ours
|
||||
wheel_timer (WheelTimer)
|
||||
now (int): Time now in ms
|
||||
prev_state
|
||||
new_state
|
||||
is_mine: Whether the user is ours
|
||||
wheel_timer
|
||||
now: Time now in ms
|
||||
|
||||
Returns:
|
||||
3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
|
||||
- new_state: is the state to actually persist
|
||||
- persist_and_notify (bool): whether to persist and notify people
|
||||
- federation_ping (bool): whether we should send a ping over federation
|
||||
- persist_and_notify: whether to persist and notify people
|
||||
- federation_ping: whether we should send a ping over federation
|
||||
"""
|
||||
user_id = new_state.user_id
|
||||
|
||||
@@ -1829,7 +1878,6 @@ async def get_interested_remotes(
|
||||
store: DataStore,
|
||||
presence_router: PresenceRouter,
|
||||
states: List[UserPresenceState],
|
||||
state_handler: StateHandler,
|
||||
) -> List[Tuple[Collection[str], List[UserPresenceState]]]:
|
||||
"""Given a list of presence states figure out which remote servers
|
||||
should be sent which.
|
||||
@@ -1840,7 +1888,6 @@ async def get_interested_remotes(
|
||||
store: The homeserver's data store.
|
||||
presence_router: A module for augmenting the destinations for presence updates.
|
||||
states: A list of incoming user presence updates.
|
||||
state_handler:
|
||||
|
||||
Returns:
|
||||
A list of 2-tuples of destinations and states, where for
|
||||
@@ -1857,7 +1904,8 @@ async def get_interested_remotes(
|
||||
)
|
||||
|
||||
for room_id, states in room_ids_to_states.items():
|
||||
hosts = await state_handler.get_current_hosts_in_room(room_id)
|
||||
user_ids = await store.get_users_in_room(room_id)
|
||||
hosts = {get_domain_from_id(user_id) for user_id in user_ids}
|
||||
hosts_and_states.append((hosts, states))
|
||||
|
||||
for user_id, states in users_to_states.items():
|
||||
@@ -1875,6 +1923,10 @@ class PresenceFederationQueue:
|
||||
Only the last N minutes will be queued, so if a federation sender instance
|
||||
is down for longer then some updates will be dropped. This is OK as presence
|
||||
is ephemeral, and so it will self correct eventually.
|
||||
|
||||
On workers the class tracks the last received position of the stream from
|
||||
replication, and handles querying for missed updates over HTTP replication,
|
||||
c.f. `get_current_token` and `get_replication_rows`.
|
||||
"""
|
||||
|
||||
# How long to keep entries in the queue for. Workers that are down for
|
||||
@@ -1895,10 +1947,15 @@ class PresenceFederationQueue:
|
||||
# another process may be handling federation sending.
|
||||
self._queue_presence_updates = True
|
||||
|
||||
# The federation sender if this instance is a federation sender.
|
||||
self._federation = hs.get_federation_sender()
|
||||
# Whether this instance is a presence writer.
|
||||
self._presence_writer = self._instance_name in hs.config.worker.writers.presence
|
||||
|
||||
# The FederationSender instance, if this process sends federation traffic directly.
|
||||
self._federation = None
|
||||
|
||||
if hs.should_send_federation():
|
||||
self._federation = hs.get_federation_sender()
|
||||
|
||||
if self._federation:
|
||||
# We don't bother queuing up presence states if only this instance
|
||||
# is sending federation.
|
||||
if hs.config.worker.federation_shard_config.instances == [
|
||||
@@ -1938,10 +1995,18 @@ class PresenceFederationQueue:
|
||||
|
||||
Will forward to the local federation sender (if there is one) and queue
|
||||
to send over replication (if there are other federation sender instances.).
|
||||
|
||||
Must only be called on the presence writer process.
|
||||
"""
|
||||
|
||||
# This should only be called on a presence writer.
|
||||
assert self._presence_writer
|
||||
|
||||
if self._federation:
|
||||
self._federation.send_presence_to_destinations(states, destinations)
|
||||
self._federation.send_presence_to_destinations(
|
||||
states=states,
|
||||
destinations=destinations,
|
||||
)
|
||||
|
||||
if not self._queue_presence_updates:
|
||||
return
|
||||
@@ -1956,6 +2021,10 @@ class PresenceFederationQueue:
|
||||
self._notifier.notify_replication()
|
||||
|
||||
def get_current_token(self, instance_name: str) -> int:
|
||||
"""Get the current position of the stream.
|
||||
|
||||
On workers this returns the last stream ID received from replication.
|
||||
"""
|
||||
if instance_name == self._instance_name:
|
||||
return self._next_id - 1
|
||||
else:
|
||||
@@ -1972,9 +2041,12 @@ class PresenceFederationQueue:
|
||||
|
||||
We return rows in the form of `(destination, user_id)` to keep the size
|
||||
of each row bounded (rather than returning the sets in a row).
|
||||
|
||||
On workers this will query the presence writer process via HTTP replication.
|
||||
"""
|
||||
if instance_name != self._instance_name:
|
||||
# If not local we query over replication.
|
||||
# If not local we query over http replication from the presence
|
||||
# writer
|
||||
result = await self._repl_client(
|
||||
instance_name=instance_name,
|
||||
stream_name=PresenceFederationStream.NAME,
|
||||
@@ -1983,18 +2055,40 @@ class PresenceFederationQueue:
|
||||
)
|
||||
return result["updates"], result["upto_token"], result["limited"]
|
||||
|
||||
# If the from_token is the current token then there's nothing to return
|
||||
# and we can trivially no-op.
|
||||
if from_token == self._next_id - 1:
|
||||
return [], upto_token, False
|
||||
|
||||
# We can find the correct position in the queue by noting that there is
|
||||
# exactly one entry per stream ID, and that the last entry has an ID of
|
||||
# `self._next_id - 1`, so we can count backwards from the end.
|
||||
#
|
||||
# Since we are returning all states in the range `from_token < stream_id
|
||||
# <= upto_token` we look for the index with a `stream_id` of `from_token
|
||||
# + 1`.
|
||||
#
|
||||
# Since the start of the queue is periodically truncated we need to
|
||||
# handle the case where `from_token` stream ID has already been dropped.
|
||||
start_idx = max(from_token - self._next_id, -len(self._queue))
|
||||
start_idx = max(from_token + 1 - self._next_id, -len(self._queue))
|
||||
|
||||
to_send = [] # type: List[Tuple[int, Tuple[str, str]]]
|
||||
limited = False
|
||||
new_id = upto_token
|
||||
for _, stream_id, destinations, user_ids in self._queue[start_idx:]:
|
||||
if stream_id <= from_token:
|
||||
# Paranoia check that we are actually only sending states that
|
||||
# are have stream_id strictly greater than from_token. We should
|
||||
# never hit this.
|
||||
logger.warning(
|
||||
"Tried returning presence federation stream ID: %d less than from_token: %d (next_id: %d, len: %d)",
|
||||
stream_id,
|
||||
from_token,
|
||||
self._next_id,
|
||||
len(self._queue),
|
||||
)
|
||||
continue
|
||||
|
||||
if stream_id > upto_token:
|
||||
break
|
||||
|
||||
@@ -2018,7 +2112,7 @@ class PresenceFederationQueue:
|
||||
if stream_name != PresenceFederationStream.NAME:
|
||||
return
|
||||
|
||||
# We keep track of the current tokens
|
||||
# We keep track of the current tokens (so that we can catch up with anything we missed after a disconnect)
|
||||
self._current_tokens[instance_name] = token
|
||||
|
||||
# If we're a federation sender we pull out the presence states to send
|
||||
@@ -2032,4 +2126,7 @@ class PresenceFederationQueue:
|
||||
|
||||
for host, user_ids in hosts_to_users.items():
|
||||
states = await self._presence_handler.current_state_for_users(user_ids)
|
||||
self._federation.send_presence_to_destinations(states.values(), [host])
|
||||
self._federation.send_presence_to_destinations(
|
||||
states=states.values(),
|
||||
destinations=[host],
|
||||
)
|
||||
|
||||
@@ -36,7 +36,9 @@ class ReceiptsHandler(BaseHandler):
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
|
||||
# in the receipts stream.
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.federation_sender = None
|
||||
if hs.should_send_federation():
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
# If we can handle the receipt EDUs we do so, otherwise we route them
|
||||
# to the appropriate worker.
|
||||
|
||||
@@ -722,9 +722,7 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
if is_guest:
|
||||
assert valid_until_ms is None
|
||||
access_token = self.macaroon_gen.generate_access_token(
|
||||
user_id, ["guest = true"]
|
||||
)
|
||||
access_token = self.macaroon_gen.generate_guest_access_token(user_id)
|
||||
else:
|
||||
access_token = await self._auth_handler.get_access_token_for_user_id(
|
||||
user_id,
|
||||
|
||||
@@ -32,7 +32,14 @@ from synapse.api.constants import (
|
||||
RoomCreationPreset,
|
||||
RoomEncryptionAlgorithms,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
@@ -126,10 +133,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
self._invite_burst_count = (
|
||||
hs.config.ratelimiting.rc_invites_per_room.burst_count
|
||||
)
|
||||
|
||||
async def upgrade_room(
|
||||
self, requester: Requester, old_room_id: str, new_version: RoomVersion
|
||||
) -> str:
|
||||
@@ -676,8 +679,18 @@ class RoomCreationHandler(BaseHandler):
|
||||
invite_3pid_list = []
|
||||
invite_list = []
|
||||
|
||||
if len(invite_list) + len(invite_3pid_list) > self._invite_burst_count:
|
||||
raise SynapseError(400, "Cannot invite so many users at once")
|
||||
if invite_list or invite_3pid_list:
|
||||
try:
|
||||
# If there are invites in the request, see if the ratelimiting settings
|
||||
# allow that number of invites to be sent from the current user.
|
||||
await self.room_member_handler.ratelimit_multiple_invites(
|
||||
requester,
|
||||
room_id=None,
|
||||
n_invites=len(invite_list) + len(invite_3pid_list),
|
||||
update=False,
|
||||
)
|
||||
except LimitExceededError:
|
||||
raise SynapseError(400, "Cannot invite so many users at once")
|
||||
|
||||
await self.event_creation_handler.assert_accepted_privacy_policy(requester)
|
||||
|
||||
@@ -1327,7 +1340,7 @@ class RoomShutdownHandler:
|
||||
new_room_id = None
|
||||
logger.info("Shutting down room %r", room_id)
|
||||
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
kicked_users = []
|
||||
failed_to_kick_users = []
|
||||
for user_id in users:
|
||||
|
||||
@@ -19,7 +19,7 @@ from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -28,7 +28,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID
|
||||
@@ -64,6 +63,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
|
||||
@@ -163,6 +163,31 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
async def forget(self, user: UserID, room_id: str) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
async def ratelimit_multiple_invites(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
room_id: Optional[str],
|
||||
n_invites: int,
|
||||
update: bool = True,
|
||||
):
|
||||
"""Ratelimit more than one invite sent by the given requester in the given room.
|
||||
|
||||
Args:
|
||||
requester: The requester sending the invites.
|
||||
room_id: The room the invites are being sent in.
|
||||
n_invites: The amount of invites to ratelimit for.
|
||||
update: Whether to update the ratelimiter's cache.
|
||||
|
||||
Raises:
|
||||
LimitExceededError: The requester can't send that many invites in the room.
|
||||
"""
|
||||
await self._invites_per_room_limiter.ratelimit(
|
||||
requester,
|
||||
room_id,
|
||||
update=update,
|
||||
n_actions=n_invites,
|
||||
)
|
||||
|
||||
async def ratelimit_invite(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
@@ -178,62 +203,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
|
||||
|
||||
async def _can_join_without_invite(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
|
||||
) -> bool:
|
||||
"""
|
||||
Check whether a user can join a room without an invite.
|
||||
|
||||
When joining a room with restricted joined rules (as defined in MSC3083),
|
||||
the membership of spaces must be checked during join.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room being joined.
|
||||
user_id: The user joining the room.
|
||||
|
||||
Returns:
|
||||
True if the user can join the room, false otherwise.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
return True
|
||||
|
||||
# If there's no join rule, then it defaults to public (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return True
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self.store.get_event(join_rules_event_id)
|
||||
if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
|
||||
return True
|
||||
|
||||
# If allowed is of the wrong form, then only allow invited users.
|
||||
allowed_spaces = join_rules_event.content.get("allow", [])
|
||||
if not isinstance(allowed_spaces, list):
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self.store.get_rooms_for_user(user_id)
|
||||
|
||||
# Pull out the other room IDs, invalid data gets filtered.
|
||||
for space in allowed_spaces:
|
||||
if not isinstance(space, dict):
|
||||
continue
|
||||
|
||||
space_id = space.get("space")
|
||||
if not isinstance(space_id, str):
|
||||
continue
|
||||
|
||||
# The user was joined to one of the spaces specified, they can join
|
||||
# this room!
|
||||
if space_id in joined_rooms:
|
||||
return True
|
||||
|
||||
# The user was not in any of the required spaces.
|
||||
return False
|
||||
|
||||
async def _local_membership_update(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -302,7 +271,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if (
|
||||
newly_joined
|
||||
and not user_is_invited
|
||||
and not await self._can_join_without_invite(
|
||||
and not await self.event_auth_handler.can_join_without_invite(
|
||||
prev_state_ids, event.room_version, user_id
|
||||
)
|
||||
):
|
||||
@@ -1100,7 +1069,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
|
||||
class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.distributor = hs.get_distributor()
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
from collections import deque
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast
|
||||
|
||||
@@ -226,6 +227,23 @@ class SpaceSummaryHandler:
|
||||
suggested_only: bool,
|
||||
max_children: Optional[int],
|
||||
) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
|
||||
"""
|
||||
Generate a room entry and a list of event entries for a given room.
|
||||
|
||||
Args:
|
||||
requester: The requesting user, or None if this is over federation.
|
||||
room_id: The room ID to summarize.
|
||||
suggested_only: True if only suggested children should be returned.
|
||||
Otherwise, all children are returned.
|
||||
max_children: The maximum number of children to return for this node.
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
An iterable of a single value of the room.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
"""
|
||||
if not await self._is_room_accessible(room_id, requester):
|
||||
return (), ()
|
||||
|
||||
@@ -288,6 +306,7 @@ class SpaceSummaryHandler:
|
||||
ev.data
|
||||
for ev in res.events
|
||||
if ev.event_type == EventTypes.MSC1772_SPACE_CHILD
|
||||
or ev.event_type == EventTypes.SpaceChild
|
||||
)
|
||||
|
||||
async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool:
|
||||
@@ -331,7 +350,9 @@ class SpaceSummaryHandler:
|
||||
)
|
||||
|
||||
# TODO: update once MSC1772 lands
|
||||
room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE)
|
||||
room_type = create_event.content.get(EventContentFields.ROOM_TYPE)
|
||||
if not room_type:
|
||||
room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE)
|
||||
|
||||
entry = {
|
||||
"room_id": stats["room_id"],
|
||||
@@ -344,6 +365,7 @@ class SpaceSummaryHandler:
|
||||
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
|
||||
),
|
||||
"guest_can_join": stats["guest_access"] == "can_join",
|
||||
"creation_ts": create_event.origin_server_ts,
|
||||
"room_type": room_type,
|
||||
}
|
||||
|
||||
@@ -353,6 +375,18 @@ class SpaceSummaryHandler:
|
||||
return room_entry
|
||||
|
||||
async def _get_child_events(self, room_id: str) -> Iterable[EventBase]:
|
||||
"""
|
||||
Get the child events for a given room.
|
||||
|
||||
The returned results are sorted for stability.
|
||||
|
||||
Args:
|
||||
room_id: The room id to get the children of.
|
||||
|
||||
Returns:
|
||||
An iterable of sorted child events.
|
||||
"""
|
||||
|
||||
# look for child rooms/spaces.
|
||||
current_state_ids = await self._store.get_current_state_ids(room_id)
|
||||
|
||||
@@ -360,13 +394,15 @@ class SpaceSummaryHandler:
|
||||
[
|
||||
event_id
|
||||
for key, event_id in current_state_ids.items()
|
||||
# TODO: update once MSC1772 lands
|
||||
# TODO: update once MSC1772 has been FCP for a period of time.
|
||||
if key[0] == EventTypes.MSC1772_SPACE_CHILD
|
||||
or key[0] == EventTypes.SpaceChild
|
||||
]
|
||||
)
|
||||
|
||||
# filter out any events without a "via" (which implies it has been redacted)
|
||||
return (e for e in events if _has_valid_via(e))
|
||||
# filter out any events without a "via" (which implies it has been redacted),
|
||||
# and order to ensure we return stable results.
|
||||
return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
@@ -392,3 +428,39 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool:
|
||||
return True
|
||||
logger.debug("Ignorning not-suggested child %s", edge_event.state_key)
|
||||
return False
|
||||
|
||||
|
||||
# Order may only contain characters in the range of \x20 (space) to \x7F (~).
|
||||
_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]")
|
||||
|
||||
|
||||
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
|
||||
"""
|
||||
Generate a value for comparing two child events for ordering.
|
||||
|
||||
The rules for ordering are supposed to be:
|
||||
|
||||
1. The 'order' key, if it is valid.
|
||||
2. The 'origin_server_ts' of the 'm.room.create' event.
|
||||
3. The 'room_id'.
|
||||
|
||||
But we skip step 2 since we may not have any state from the room.
|
||||
|
||||
Args:
|
||||
child: The event for generating a comparison key.
|
||||
|
||||
Returns:
|
||||
The comparison key as a tuple of:
|
||||
False if the ordering is valid.
|
||||
The ordering field.
|
||||
The room ID.
|
||||
"""
|
||||
order = child.content.get("order")
|
||||
# If order is not a string or doesn't meet the requirements, ignore it.
|
||||
if not isinstance(order, str):
|
||||
order = None
|
||||
elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order):
|
||||
order = None
|
||||
|
||||
# Items without an order come last.
|
||||
return (order is None, order, child.room_id)
|
||||
|
||||
@@ -18,6 +18,7 @@ from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -40,7 +41,7 @@ from synapse.handlers.ui_auth import UIAuthSessionDataConstants
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.server import respond_with_html, respond_with_redirect
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Collection, JsonDict, UserID, contains_invalid_mxid_characters
|
||||
from synapse.types import JsonDict, UserID, contains_invalid_mxid_characters
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
||||
@@ -14,7 +14,17 @@
|
||||
# limitations under the License.
|
||||
import itertools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
@@ -28,7 +38,6 @@ from synapse.push.clientformat import format_push_rules_for_user
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
Collection,
|
||||
JsonDict,
|
||||
MutableStateMap,
|
||||
Requester,
|
||||
@@ -1181,7 +1190,7 @@ class SyncHandler:
|
||||
|
||||
# Step 1b, check for newly joined rooms
|
||||
for room_id in newly_joined_rooms:
|
||||
joined_users = await self.state.get_current_users_in_room(room_id)
|
||||
joined_users = await self.store.get_users_in_room(room_id)
|
||||
newly_joined_or_invited_users.update(joined_users)
|
||||
|
||||
# TODO: Check that these users are actually new, i.e. either they
|
||||
@@ -1197,7 +1206,7 @@ class SyncHandler:
|
||||
|
||||
# Now find users that we no longer track
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = await self.state.get_current_users_in_room(room_id)
|
||||
left_users = await self.store.get_users_in_room(room_id)
|
||||
newly_left_users.update(left_users)
|
||||
|
||||
# Remove any users that we still share a room with.
|
||||
@@ -1352,7 +1361,7 @@ class SyncHandler:
|
||||
|
||||
extra_users_ids = set(newly_joined_or_invited_users)
|
||||
for room_id in newly_joined_rooms:
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
extra_users_ids.update(users)
|
||||
extra_users_ids.discard(user.to_string())
|
||||
|
||||
|
||||
@@ -57,7 +57,9 @@ class FollowerTypingHandler:
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.federation = hs.get_federation_sender()
|
||||
self.federation = None
|
||||
if hs.should_send_federation():
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
if hs.config.worker.writers.typing != hs.get_instance_name():
|
||||
hs.get_federation_registry().register_instance_for_edu(
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from twisted.web.client import PartialDownloadError
|
||||
|
||||
@@ -22,13 +22,16 @@ from synapse.api.errors import Codes, LoginError, SynapseError
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.util import json_decoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserInteractiveAuthChecker:
|
||||
"""Abstract base class for an interactive auth checker"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
pass
|
||||
|
||||
def is_enabled(self) -> bool:
|
||||
@@ -57,10 +60,10 @@ class UserInteractiveAuthChecker:
|
||||
class DummyAuthChecker(UserInteractiveAuthChecker):
|
||||
AUTH_TYPE = LoginType.DUMMY
|
||||
|
||||
def is_enabled(self):
|
||||
def is_enabled(self) -> bool:
|
||||
return True
|
||||
|
||||
async def check_auth(self, authdict, clientip):
|
||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||
return True
|
||||
|
||||
|
||||
@@ -70,24 +73,24 @@ class TermsAuthChecker(UserInteractiveAuthChecker):
|
||||
def is_enabled(self):
|
||||
return True
|
||||
|
||||
async def check_auth(self, authdict, clientip):
|
||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||
return True
|
||||
|
||||
|
||||
class RecaptchaAuthChecker(UserInteractiveAuthChecker):
|
||||
AUTH_TYPE = LoginType.RECAPTCHA
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self._enabled = bool(hs.config.recaptcha_private_key)
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._url = hs.config.recaptcha_siteverify_api
|
||||
self._secret = hs.config.recaptcha_private_key
|
||||
|
||||
def is_enabled(self):
|
||||
def is_enabled(self) -> bool:
|
||||
return self._enabled
|
||||
|
||||
async def check_auth(self, authdict, clientip):
|
||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||
try:
|
||||
user_response = authdict["response"]
|
||||
except KeyError:
|
||||
@@ -132,11 +135,11 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
|
||||
|
||||
|
||||
class _BaseThreepidAuthChecker:
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def _check_threepid(self, medium, authdict):
|
||||
async def _check_threepid(self, medium: str, authdict: dict) -> dict:
|
||||
if "threepid_creds" not in authdict:
|
||||
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
|
||||
|
||||
@@ -206,31 +209,31 @@ class _BaseThreepidAuthChecker:
|
||||
class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
|
||||
AUTH_TYPE = LoginType.EMAIL_IDENTITY
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
UserInteractiveAuthChecker.__init__(self, hs)
|
||||
_BaseThreepidAuthChecker.__init__(self, hs)
|
||||
|
||||
def is_enabled(self):
|
||||
def is_enabled(self) -> bool:
|
||||
return self.hs.config.threepid_behaviour_email in (
|
||||
ThreepidBehaviour.REMOTE,
|
||||
ThreepidBehaviour.LOCAL,
|
||||
)
|
||||
|
||||
async def check_auth(self, authdict, clientip):
|
||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||
return await self._check_threepid("email", authdict)
|
||||
|
||||
|
||||
class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
|
||||
AUTH_TYPE = LoginType.MSISDN
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
UserInteractiveAuthChecker.__init__(self, hs)
|
||||
_BaseThreepidAuthChecker.__init__(self, hs)
|
||||
|
||||
def is_enabled(self):
|
||||
def is_enabled(self) -> bool:
|
||||
return bool(self.hs.config.account_threepid_delegate_msisdn)
|
||||
|
||||
async def check_auth(self, authdict, clientip):
|
||||
async def check_auth(self, authdict: dict, clientip: str) -> Any:
|
||||
return await self._check_threepid("msisdn", authdict)
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -302,10 +301,12 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# ignore the change
|
||||
return
|
||||
|
||||
users_with_profile = await self.state.get_current_users_in_room(room_id)
|
||||
other_users_in_room_with_profiles = (
|
||||
await self.store.get_users_in_room_with_profiles(room_id)
|
||||
)
|
||||
|
||||
# Remove every user from the sharing tables for that room.
|
||||
for user_id in users_with_profile.keys():
|
||||
for user_id in other_users_in_room_with_profiles.keys():
|
||||
await self.store.remove_user_who_share_room(user_id, room_id)
|
||||
|
||||
# Then, re-add them to the tables.
|
||||
@@ -314,7 +315,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# which when ran over an entire room, will result in the same values
|
||||
# being added multiple times. The batching upserts shouldn't make this
|
||||
# too bad, though.
|
||||
for user_id, profile in users_with_profile.items():
|
||||
for user_id, profile in other_users_in_room_with_profiles.items():
|
||||
await self._handle_new_user(room_id, user_id, profile)
|
||||
|
||||
async def _handle_new_user(
|
||||
@@ -336,7 +337,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
room_id
|
||||
)
|
||||
# Now we update users who share rooms with users.
|
||||
users_with_profile = await self.state.get_current_users_in_room(room_id)
|
||||
other_users_in_room = await self.store.get_users_in_room(room_id)
|
||||
|
||||
if is_public:
|
||||
await self.store.add_users_in_public_rooms(room_id, (user_id,))
|
||||
@@ -352,14 +353,14 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
|
||||
# We don't care about appservice users.
|
||||
if not is_appservice:
|
||||
for other_user_id in users_with_profile:
|
||||
for other_user_id in other_users_in_room:
|
||||
if user_id == other_user_id:
|
||||
continue
|
||||
|
||||
to_insert.add((user_id, other_user_id))
|
||||
|
||||
# Next we need to update for every local user in the room
|
||||
for other_user_id in users_with_profile:
|
||||
for other_user_id in other_users_in_room:
|
||||
if user_id == other_user_id:
|
||||
continue
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Protocol
|
||||
from zope.interface import implementer, provider
|
||||
|
||||
from OpenSSL import SSL
|
||||
@@ -754,6 +755,16 @@ def _timeout_to_request_timed_out_error(f: Failure):
|
||||
return f
|
||||
|
||||
|
||||
class ByteWriteable(Protocol):
|
||||
"""The type of object which must be passed into read_body_with_max_size.
|
||||
|
||||
Typically this is a file object.
|
||||
"""
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
pass
|
||||
|
||||
|
||||
class BodyExceededMaxSize(Exception):
|
||||
"""The maximum allowed size of the HTTP body was exceeded."""
|
||||
|
||||
@@ -790,7 +801,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
transport = None # type: Optional[ITCPTransport]
|
||||
|
||||
def __init__(
|
||||
self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int]
|
||||
self, stream: ByteWriteable, deferred: defer.Deferred, max_size: Optional[int]
|
||||
):
|
||||
self.stream = stream
|
||||
self.deferred = deferred
|
||||
@@ -830,7 +841,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
|
||||
|
||||
def read_body_with_max_size(
|
||||
response: IResponse, stream: BinaryIO, max_size: Optional[int]
|
||||
response: IResponse, stream: ByteWriteable, max_size: Optional[int]
|
||||
) -> defer.Deferred:
|
||||
"""
|
||||
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,11 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import cgi
|
||||
import codecs
|
||||
import logging
|
||||
import random
|
||||
import sys
|
||||
import typing
|
||||
import urllib.parse
|
||||
from io import BytesIO
|
||||
from io import BytesIO, StringIO
|
||||
from typing import Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
@@ -72,6 +73,9 @@ incoming_responses_counter = Counter(
|
||||
"synapse_http_matrixfederationclient_responses", "", ["method", "code"]
|
||||
)
|
||||
|
||||
# a federation response can be rather large (eg a big state_ids is 50M or so), so we
|
||||
# need a generous limit here.
|
||||
MAX_RESPONSE_SIZE = 100 * 1024 * 1024
|
||||
|
||||
MAX_LONG_RETRIES = 10
|
||||
MAX_SHORT_RETRIES = 3
|
||||
@@ -167,12 +171,27 @@ async def _handle_json_response(
|
||||
try:
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
# Use the custom JSON decoder (partially re-implements treq.json_content).
|
||||
d = treq.text_content(response, encoding="utf-8")
|
||||
d.addCallback(json_decoder.decode)
|
||||
buf = StringIO()
|
||||
d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE)
|
||||
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
|
||||
|
||||
def parse(_len: int):
|
||||
return json_decoder.decode(buf.getvalue())
|
||||
|
||||
d.addCallback(parse)
|
||||
|
||||
body = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize as e:
|
||||
# The response was too big.
|
||||
logger.warning(
|
||||
"{%s} [%s] JSON response exceeded max size %i - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
MAX_RESPONSE_SIZE,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=False) from e
|
||||
except ValueError as e:
|
||||
# The JSON content was invalid.
|
||||
logger.warning(
|
||||
@@ -218,6 +237,18 @@ async def _handle_json_response(
|
||||
return body
|
||||
|
||||
|
||||
class BinaryIOWrapper:
|
||||
"""A wrapper for a TextIO which converts from bytes on the fly."""
|
||||
|
||||
def __init__(self, file: typing.TextIO, encoding="utf-8", errors="strict"):
|
||||
self.decoder = codecs.getincrementaldecoder(encoding)(errors)
|
||||
self.file = file
|
||||
|
||||
def write(self, b: Union[bytes, bytearray]) -> int:
|
||||
self.file.write(self.decoder.decode(b))
|
||||
return len(b)
|
||||
|
||||
|
||||
class MatrixFederationHttpClient:
|
||||
"""HTTP client used to talk to other homeservers over the federation
|
||||
protocol. Send client certificates and signs requests.
|
||||
|
||||
@@ -14,13 +14,14 @@
|
||||
import contextlib
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, Tuple, Type, Union
|
||||
from typing import Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet.interfaces import IAddress
|
||||
from twisted.internet.interfaces import IAddress, IReactorTime
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request, Site
|
||||
|
||||
from synapse.config.server import ListenerConfig
|
||||
@@ -49,6 +50,7 @@ class SynapseRequest(Request):
|
||||
* Redaction of access_token query-params in __repr__
|
||||
* Logging at start and end
|
||||
* Metrics to record CPU, wallclock and DB time by endpoint.
|
||||
* A limit to the size of request which will be accepted
|
||||
|
||||
It also provides a method `processing`, which returns a context manager. If this
|
||||
method is called, the request won't be logged until the context manager is closed;
|
||||
@@ -59,8 +61,9 @@ class SynapseRequest(Request):
|
||||
logcontext: the log context for this request
|
||||
"""
|
||||
|
||||
def __init__(self, channel, *args, **kw):
|
||||
def __init__(self, channel, *args, max_request_body_size=1024, **kw):
|
||||
Request.__init__(self, channel, *args, **kw)
|
||||
self._max_request_body_size = max_request_body_size
|
||||
self.site = channel.site # type: SynapseSite
|
||||
self._channel = channel # this is used by the tests
|
||||
self.start_time = 0.0
|
||||
@@ -97,6 +100,18 @@ class SynapseRequest(Request):
|
||||
self.site.site_tag,
|
||||
)
|
||||
|
||||
def handleContentChunk(self, data):
|
||||
# we should have a `content` by now.
|
||||
assert self.content, "handleContentChunk() called before gotLength()"
|
||||
if self.content.tell() + len(data) > self._max_request_body_size:
|
||||
logger.warning(
|
||||
"Aborting connection from %s because the request exceeds maximum size",
|
||||
self.client,
|
||||
)
|
||||
self.transport.abortConnection()
|
||||
return
|
||||
super().handleContentChunk(data)
|
||||
|
||||
@property
|
||||
def requester(self) -> Optional[Union[Requester, str]]:
|
||||
return self._requester
|
||||
@@ -485,29 +500,55 @@ class _XForwardedForAddress:
|
||||
|
||||
class SynapseSite(Site):
|
||||
"""
|
||||
Subclass of a twisted http Site that does access logging with python's
|
||||
standard logging
|
||||
Synapse-specific twisted http Site
|
||||
|
||||
This does two main things.
|
||||
|
||||
First, it replaces the requestFactory in use so that we build SynapseRequests
|
||||
instead of regular t.w.server.Requests. All of the constructor params are really
|
||||
just parameters for SynapseRequest.
|
||||
|
||||
Second, it inhibits the log() method called by Request.finish, since SynapseRequest
|
||||
does its own logging.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
logger_name,
|
||||
site_tag,
|
||||
logger_name: str,
|
||||
site_tag: str,
|
||||
config: ListenerConfig,
|
||||
resource,
|
||||
resource: IResource,
|
||||
server_version_string,
|
||||
*args,
|
||||
**kwargs,
|
||||
max_request_body_size: int,
|
||||
reactor: IReactorTime,
|
||||
):
|
||||
Site.__init__(self, resource, *args, **kwargs)
|
||||
"""
|
||||
|
||||
Args:
|
||||
logger_name: The name of the logger to use for access logs.
|
||||
site_tag: A tag to use for this site - mostly in access logs.
|
||||
config: Configuration for the HTTP listener corresponding to this site
|
||||
resource: The base of the resource tree to be used for serving requests on
|
||||
this site
|
||||
server_version_string: A string to present for the Server header
|
||||
max_request_body_size: Maximum request body length to allow before
|
||||
dropping the connection
|
||||
reactor: reactor to be used to manage connection timeouts
|
||||
"""
|
||||
Site.__init__(self, resource, reactor=reactor)
|
||||
|
||||
self.site_tag = site_tag
|
||||
|
||||
assert config.http_options is not None
|
||||
proxied = config.http_options.x_forwarded
|
||||
self.requestFactory = (
|
||||
XForwardedForRequest if proxied else SynapseRequest
|
||||
) # type: Type[Request]
|
||||
request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||
|
||||
def request_factory(channel, queued) -> Request:
|
||||
return request_class(
|
||||
channel, max_request_body_size=max_request_body_size, queued=queued
|
||||
)
|
||||
|
||||
self.requestFactory = request_factory # type: ignore
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
self.server_version_string = server_version_string.encode("ascii")
|
||||
|
||||
|
||||
@@ -12,8 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# These are imported to allow for nicer logging configuration files.
|
||||
import logging
|
||||
|
||||
from synapse.logging._remote import RemoteHandler
|
||||
from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter
|
||||
|
||||
# These are imported to allow for nicer logging configuration files.
|
||||
__all__ = ["RemoteHandler", "JsonFormatter", "TerseJsonFormatter"]
|
||||
|
||||
# Debug logger for https://github.com/matrix-org/synapse/issues/9533 etc
|
||||
issue9533_logger = logging.getLogger("synapse.9533_debug")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user