Compare commits
1 Commits
v1.35.0
...
anoa/fix_f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50adefae98 |
@@ -3,7 +3,7 @@
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -16,4 +16,6 @@ database:
|
||||
database: synapse
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
|
||||
36
.buildkite/scripts/create_postgres_db.py
Executable file
36
.buildkite/scripts/create_postgres_db.py
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create a PostgresEngine.
|
||||
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||
|
||||
# Connect to postgres to create the base database.
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = db_engine.module.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("CREATE DATABASE synapse;")
|
||||
cur.close()
|
||||
db_conn.close()
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import psycopg2
|
||||
|
||||
# a very simple replacment for `psql`, to make up for the lack of the postgres client
|
||||
# libraries in the synapse docker image.
|
||||
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
for c in sys.argv[1:]:
|
||||
cur.execute(c)
|
||||
@@ -1,10 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Test script for 'synapse_port_db'.
|
||||
# - sets up synapse and deps
|
||||
# - runs the port script on a prepopulated test sqlite db
|
||||
# - also runs it against an new sqlite db
|
||||
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
@@ -22,36 +22,15 @@ echo "--- Generate the signing key"
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
echo "--- Prepare the databases"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
./.buildkite/scripts/create_postgres_db.py
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
# Now do the same again, on an empty database.
|
||||
|
||||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .buildkite/test_db.db
|
||||
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
./.buildkite/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
echo "+++ Run synapse_port_db"
|
||||
|
||||
# Run the script
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -13,4 +13,6 @@ database:
|
||||
database: ".buildkite/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -273,7 +273,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
|
||||
274
CHANGES.md
274
CHANGES.md
@@ -1,267 +1,3 @@
|
||||
Synapse 1.35.0 (2021-06-01)
|
||||
===========================
|
||||
|
||||
Note that [the tag](https://github.com/matrix-org/synapse/releases/tag/v1.35.0rc3) and [docker images](https://hub.docker.com/layers/matrixdotorg/synapse/v1.35.0rc3/images/sha256-34ccc87bd99a17e2cbc0902e678b5937d16bdc1991ead097eee6096481ecf2c4?context=explore) for `v1.35.0rc3` were incorrectly built. If you are experiencing issues with either, it is recommended to upgrade to the equivalent tag or docker image for the `v1.35.0` release.
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. ([\#10101](https://github.com/matrix-org/synapse/issues/10101))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. Introduced in v1.33.0. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
|
||||
- Fix HTTP response size limit to allow joining very large rooms over federation. Introduced in v1.33.0. ([\#10093](https://github.com/matrix-org/synapse/issues/10093))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
|
||||
|
||||
|
||||
Synapse 1.35.0rc2 (2021-05-27)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. ([\#10079](https://github.com/matrix-org/synapse/issues/10079))
|
||||
|
||||
|
||||
Synapse 1.35.0rc1 (2021-05-25)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support to allow a user who could join a restricted room to view it in the spaces summary. ([\#9922](https://github.com/matrix-org/synapse/issues/9922), [\#10007](https://github.com/matrix-org/synapse/issues/10007), [\#10038](https://github.com/matrix-org/synapse/issues/10038))
|
||||
- Reduce memory usage when joining very large rooms over federation. ([\#9958](https://github.com/matrix-org/synapse/issues/9958))
|
||||
- Add a configuration option which allows enabling opentracing by user id. ([\#9978](https://github.com/matrix-org/synapse/issues/9978))
|
||||
- Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. ([\#10011](https://github.com/matrix-org/synapse/issues/10011))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. ([\#9991](https://github.com/matrix-org/synapse/issues/9991))
|
||||
- Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. ([\#9995](https://github.com/matrix-org/synapse/issues/9995))
|
||||
- Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. ([\#10002](https://github.com/matrix-org/synapse/issues/10002))
|
||||
- Fixed deletion of new presence stream states from database. ([\#10014](https://github.com/matrix-org/synapse/issues/10014), [\#10033](https://github.com/matrix-org/synapse/issues/10033))
|
||||
- Fixed a bug with very high resolution image uploads throwing internal server errors. ([\#10029](https://github.com/matrix-org/synapse/issues/10029))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. ([\#10045](https://github.com/matrix-org/synapse/issues/10045))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. ([\#9803](https://github.com/matrix-org/synapse/issues/9803))
|
||||
- Clarify documentation around SSO mapping providers generating unique IDs and localparts. ([\#9980](https://github.com/matrix-org/synapse/issues/9980))
|
||||
- Updates to the PostgreSQL documentation (`postgres.md`). ([\#9988](https://github.com/matrix-org/synapse/issues/9988), [\#9989](https://github.com/matrix-org/synapse/issues/9989))
|
||||
- Fix broken link in user directory documentation. Contributed by @junquera. ([\#10016](https://github.com/matrix-org/synapse/issues/10016))
|
||||
- Add missing room state entry to the table of contents of room admin API. ([\#10043](https://github.com/matrix-org/synapse/issues/10043))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. ([\#9280](https://github.com/matrix-org/synapse/issues/9280))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. ([\#9823](https://github.com/matrix-org/synapse/issues/9823))
|
||||
- Update comments in the space summary handler. ([\#9974](https://github.com/matrix-org/synapse/issues/9974))
|
||||
- Minor enhancements to the `@cachedList` descriptor. ([\#9975](https://github.com/matrix-org/synapse/issues/9975))
|
||||
- Split multipart email sending into a dedicated handler. ([\#9977](https://github.com/matrix-org/synapse/issues/9977))
|
||||
- Run `black` on files in the `scripts` directory. ([\#9981](https://github.com/matrix-org/synapse/issues/9981))
|
||||
- Add missing type hints to `synapse.util` module. ([\#9982](https://github.com/matrix-org/synapse/issues/9982))
|
||||
- Simplify a few helper functions. ([\#9984](https://github.com/matrix-org/synapse/issues/9984), [\#9985](https://github.com/matrix-org/synapse/issues/9985), [\#9986](https://github.com/matrix-org/synapse/issues/9986))
|
||||
- Remove unnecessary property from SQLBaseStore. ([\#9987](https://github.com/matrix-org/synapse/issues/9987))
|
||||
- Remove `keylen` param on `LruCache`. ([\#9993](https://github.com/matrix-org/synapse/issues/9993))
|
||||
- Update the Grafana dashboard in `contrib/`. ([\#10001](https://github.com/matrix-org/synapse/issues/10001))
|
||||
- Add a batching queue implementation. ([\#10017](https://github.com/matrix-org/synapse/issues/10017))
|
||||
- Reduce memory usage when verifying signatures on large numbers of events at once. ([\#10018](https://github.com/matrix-org/synapse/issues/10018))
|
||||
- Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). ([\#10036](https://github.com/matrix-org/synapse/issues/10036))
|
||||
- Fix running complement tests with Synapse workers. ([\#10039](https://github.com/matrix-org/synapse/issues/10039))
|
||||
- Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. ([\#10050](https://github.com/matrix-org/synapse/issues/10050))
|
||||
|
||||
|
||||
Synapse 1.34.0 (2021-05-17)
|
||||
===========================
|
||||
|
||||
This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting.
|
||||
|
||||
This release also deprecates the `POST /_synapse/admin/v1/rooms/<room_id>/delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/<room_id>` route instead.
|
||||
|
||||
|
||||
No significant changes since v1.34.0rc1.
|
||||
|
||||
|
||||
Synapse 1.34.0rc1 (2021-05-12)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881))
|
||||
- Add support for `DELETE /_synapse/admin/v1/rooms/<room_id>`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902))
|
||||
- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951))
|
||||
- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916))
|
||||
- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966))
|
||||
- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935))
|
||||
- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945))
|
||||
- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895))
|
||||
- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910))
|
||||
- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928))
|
||||
- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930))
|
||||
- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965))
|
||||
- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Mark as deprecated `POST /_synapse/admin/v1/rooms/<room_id>/delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588))
|
||||
- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882))
|
||||
- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885))
|
||||
- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886))
|
||||
- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896))
|
||||
- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904))
|
||||
- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931))
|
||||
- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932))
|
||||
- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959))
|
||||
|
||||
|
||||
Synapse 1.33.2 (2021-05-11)
|
||||
===========================
|
||||
|
||||
Due to the security issue highlighted below, server administrators are encouraged to update Synapse. We are not aware of these vulnerabilities being exploited in the wild.
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
This release fixes a denial of service attack ([CVE-2021-29471](https://github.com/matrix-org/synapse/security/advisories/GHSA-x345-32rc-8h85)) against Synapse's push rules implementation. Server admins are encouraged to upgrade.
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Unpin attrs dependency. ([\#9946](https://github.com/matrix-org/synapse/issues/9946))
|
||||
|
||||
|
||||
Synapse 1.33.1 (2021-05-06)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. ([\#9937](https://github.com/matrix-org/synapse/issues/9937))
|
||||
|
||||
|
||||
Synapse 1.33.0 (2021-05-05)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). ([\#9909](https://github.com/matrix-org/synapse/issues/9909))
|
||||
|
||||
|
||||
Synapse 1.33.0rc2 (2021-04-29)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix tight loop when handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900))
|
||||
|
||||
|
||||
Synapse 1.33.0rc1 (2021-04-28)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814))
|
||||
- Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850))
|
||||
- Return a new template when an user attempts to renew their account multiple times with the same token, stating that their account is set to expire. This replaces the invalid token template that would previously be shown in this case. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. ([\#9726](https://github.com/matrix-org/synapse/issues/9726))
|
||||
- Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. ([\#9788](https://github.com/matrix-org/synapse/issues/9788))
|
||||
- Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. ([\#9802](https://github.com/matrix-org/synapse/issues/9802))
|
||||
- Limit the size of HTTP responses read over federation. ([\#9833](https://github.com/matrix-org/synapse/issues/9833))
|
||||
- Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. ([\#9867](https://github.com/matrix-org/synapse/issues/9867))
|
||||
- Fix a long-standing bug where errors from federation did not propagate to the client. ([\#9868](https://github.com/matrix-org/synapse/issues/9868))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. ([\#9801](https://github.com/matrix-org/synapse/issues/9801))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add a dockerfile for running Synapse in worker-mode under Complement. ([\#9162](https://github.com/matrix-org/synapse/issues/9162))
|
||||
- Apply `pyupgrade` across the codebase. ([\#9786](https://github.com/matrix-org/synapse/issues/9786))
|
||||
- Move some replication processing out of `generic_worker`. ([\#9796](https://github.com/matrix-org/synapse/issues/9796))
|
||||
- Replace `HomeServer.get_config()` with inline references. ([\#9815](https://github.com/matrix-org/synapse/issues/9815))
|
||||
- Rename some handlers and config modules to not duplicate the top-level module. ([\#9816](https://github.com/matrix-org/synapse/issues/9816))
|
||||
- Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. ([\#9817](https://github.com/matrix-org/synapse/issues/9817))
|
||||
- Reduce CPU usage of the user directory by reusing existing calculated room membership. ([\#9821](https://github.com/matrix-org/synapse/issues/9821))
|
||||
- Small speed up for joining large remote rooms. ([\#9825](https://github.com/matrix-org/synapse/issues/9825))
|
||||
- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9838](https://github.com/matrix-org/synapse/issues/9838))
|
||||
- Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. ([\#9845](https://github.com/matrix-org/synapse/issues/9845))
|
||||
- Limit length of accepted email addresses. ([\#9855](https://github.com/matrix-org/synapse/issues/9855))
|
||||
- Remove redundant `synapse.types.Collection` type definition. ([\#9856](https://github.com/matrix-org/synapse/issues/9856))
|
||||
- Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. ([\#9858](https://github.com/matrix-org/synapse/issues/9858))
|
||||
- Disable invite rate-limiting by default when running the unit tests. ([\#9871](https://github.com/matrix-org/synapse/issues/9871))
|
||||
- Pass a reactor into `SynapseSite` to make testing easier. ([\#9874](https://github.com/matrix-org/synapse/issues/9874))
|
||||
- Make `DomainSpecificString` an `attrs` class. ([\#9875](https://github.com/matrix-org/synapse/issues/9875))
|
||||
- Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. ([\#9876](https://github.com/matrix-org/synapse/issues/9876))
|
||||
- Remove redundant `_PushHTTPChannel` test class. ([\#9878](https://github.com/matrix-org/synapse/issues/9878))
|
||||
- Remove backwards-compatibility code for Python versions < 3.6. ([\#9879](https://github.com/matrix-org/synapse/issues/9879))
|
||||
- Small performance improvement around handling new local presence updates. ([\#9887](https://github.com/matrix-org/synapse/issues/9887))
|
||||
|
||||
|
||||
Synapse 1.32.2 (2021-04-22)
|
||||
===========================
|
||||
|
||||
@@ -277,7 +13,7 @@ Synapse 1.32.1 (2021-04-21)
|
||||
===========================
|
||||
|
||||
This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
|
||||
However, as this release is still subject to the `LoggingContext` change in 1.32.0,
|
||||
it is recommended to remain on or downgrade to 1.31.0.
|
||||
@@ -293,11 +29,11 @@ Synapse 1.32.0 (2021-04-20)
|
||||
|
||||
**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
that can overwhelm connected Prometheus instances. This issue was not present in
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183)
|
||||
to clean up any excess writeahead logs.
|
||||
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
modules that import `synapse.logging.context.LoggingContext`, such as
|
||||
[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider).
|
||||
This will be fixed in a later Synapse version.
|
||||
@@ -308,8 +44,8 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/<user_id>` adm
|
||||
|
||||
This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you
|
||||
will need to get a fresh copy of the keys. You can do so with:
|
||||
|
||||
|
||||
26
UPGRADE.rst
26
UPGRADE.rst
@@ -85,32 +85,6 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.34.0
|
||||
====================
|
||||
|
||||
``room_invite_state_types`` configuration setting
|
||||
-----------------------------------------------
|
||||
|
||||
The ``room_invite_state_types`` configuration setting has been deprecated and
|
||||
replaced with ``room_prejoin_state``. See the `sample configuration file <https://github.com/matrix-org/synapse/blob/v1.34.0/docs/sample_config.yaml#L1515>`_.
|
||||
|
||||
If you have set ``room_invite_state_types`` to the default value you should simply
|
||||
remove it from your configuration file. The default value used to be:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.encryption"
|
||||
- "m.room.name"
|
||||
|
||||
If you have customised this value, you should remove ``room_invite_state_types`` and
|
||||
configure ``room_prejoin_state`` instead.
|
||||
|
||||
|
||||
|
||||
Upgrading to v1.33.0
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/9162.misc
Normal file
1
changelog.d/9162.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a dockerfile for running Synapse in worker-mode under Complement.
|
||||
1
changelog.d/9702.misc
Normal file
1
changelog.d/9702.misc
Normal file
@@ -0,0 +1 @@
|
||||
Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan.
|
||||
1
changelog.d/9726.bugfix
Normal file
1
changelog.d/9726.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path.
|
||||
1
changelog.d/9786.misc
Normal file
1
changelog.d/9786.misc
Normal file
@@ -0,0 +1 @@
|
||||
Apply `pyupgrade` across the codebase.
|
||||
1
changelog.d/9788.bugfix
Normal file
1
changelog.d/9788.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg.
|
||||
1
changelog.d/9796.misc
Normal file
1
changelog.d/9796.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move some replication processing out of `generic_worker`.
|
||||
1
changelog.d/9800.feature
Normal file
1
changelog.d/9800.feature
Normal file
@@ -0,0 +1 @@
|
||||
Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
1
changelog.d/9801.doc
Normal file
1
changelog.d/9801.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms.
|
||||
1
changelog.d/9802.bugfix
Normal file
1
changelog.d/9802.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Add some sanity checks to identity server passed to 3PID bind/unbind endpoints.
|
||||
1
changelog.d/9814.feature
Normal file
1
changelog.d/9814.feature
Normal file
@@ -0,0 +1 @@
|
||||
Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
1
changelog.d/9815.misc
Normal file
1
changelog.d/9815.misc
Normal file
@@ -0,0 +1 @@
|
||||
Replace `HomeServer.get_config()` with inline references.
|
||||
1
changelog.d/9816.misc
Normal file
1
changelog.d/9816.misc
Normal file
@@ -0,0 +1 @@
|
||||
Rename some handlers and config modules to not duplicate the top-level module.
|
||||
1
changelog.d/9817.misc
Normal file
1
changelog.d/9817.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced.
|
||||
1
changelog.d/9819.feature
Normal file
1
changelog.d/9819.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
1
changelog.d/9820.feature
Normal file
1
changelog.d/9820.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
1
changelog.d/9821.misc
Normal file
1
changelog.d/9821.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce CPU usage of the user directory by reusing existing calculated room membership.
|
||||
1
changelog.d/9825.misc
Normal file
1
changelog.d/9825.misc
Normal file
@@ -0,0 +1 @@
|
||||
Small speed up for joining large remote rooms.
|
||||
1
changelog.d/9828.feature
Normal file
1
changelog.d/9828.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
1
changelog.d/9832.feature
Normal file
1
changelog.d/9832.feature
Normal file
@@ -0,0 +1 @@
|
||||
Don't return an error when a user attempts to renew their account multiple times with the same token. Instead, state when their account is set to expire. This change concerns the optional account validity feature.
|
||||
1
changelog.d/9833.bugfix
Normal file
1
changelog.d/9833.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Limit the size of HTTP responses read over federation.
|
||||
1
changelog.d/9838.misc
Normal file
1
changelog.d/9838.misc
Normal file
@@ -0,0 +1 @@
|
||||
Introduce flake8-bugbear to the test suite and fix some of its lint violations.
|
||||
1
changelog.d/9845.misc
Normal file
1
changelog.d/9845.misc
Normal file
@@ -0,0 +1 @@
|
||||
Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores.
|
||||
1
changelog.d/9850.feature
Normal file
1
changelog.d/9850.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
1
changelog.d/9855.misc
Normal file
1
changelog.d/9855.misc
Normal file
@@ -0,0 +1 @@
|
||||
Limit length of accepted email addresses.
|
||||
1
changelog.d/9856.misc
Normal file
1
changelog.d/9856.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove redundant `synapse.types.Collection` type definition.
|
||||
1
changelog.d/9858.misc
Normal file
1
changelog.d/9858.misc
Normal file
@@ -0,0 +1 @@
|
||||
Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts.
|
||||
1
changelog.d/9867.bugfix
Normal file
1
changelog.d/9867.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists.
|
||||
1
changelog.d/9871.misc
Normal file
1
changelog.d/9871.misc
Normal file
@@ -0,0 +1 @@
|
||||
Disable invite rate-limiting by default when running the unit tests.
|
||||
1
changelog.d/9874.misc
Normal file
1
changelog.d/9874.misc
Normal file
@@ -0,0 +1 @@
|
||||
Pass a reactor into `SynapseSite` to make testing easier.
|
||||
1
changelog.d/9875.misc
Normal file
1
changelog.d/9875.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make `DomainSpecificString` an `attrs` class.
|
||||
1
changelog.d/9876.misc
Normal file
1
changelog.d/9876.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules.
|
||||
1
changelog.d/9878.misc
Normal file
1
changelog.d/9878.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove redundant `_PushHTTPChannel` test class.
|
||||
1
changelog.d/9887.misc
Normal file
1
changelog.d/9887.misc
Normal file
@@ -0,0 +1 @@
|
||||
Small performance improvement around handling new local presence updates.
|
||||
@@ -224,14 +224,16 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
@@ -253,7 +255,7 @@ class HomeServer(ReplicationHandler):
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdu(pdu)
|
||||
yield self.replication_layer.send_pdus([pdu])
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@@ -265,16 +267,18 @@ class HomeServer(ReplicationHandler):
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,71 +0,0 @@
|
||||
[Service]
|
||||
# The following directives give the synapse service R/W access to:
|
||||
# - /run/matrix-synapse
|
||||
# - /var/lib/matrix-synapse
|
||||
# - /var/log/matrix-synapse
|
||||
|
||||
RuntimeDirectory=matrix-synapse
|
||||
StateDirectory=matrix-synapse
|
||||
LogsDirectory=matrix-synapse
|
||||
|
||||
######################
|
||||
## Security Sandbox ##
|
||||
######################
|
||||
|
||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
||||
# cannot see or change any real devices
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
|
||||
# We give no capabilities to a service by default
|
||||
CapabilityBoundingSet=
|
||||
AmbientCapabilities=
|
||||
|
||||
# Protect the following from modification:
|
||||
# - The entire filesystem
|
||||
# - sysctl settings and loaded kernel modules
|
||||
# - No modifications allowed to Control Groups
|
||||
# - Hostname
|
||||
# - System Clock
|
||||
ProtectSystem=strict
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
ProtectClock=true
|
||||
ProtectHostname=true
|
||||
|
||||
# Prevent access to the following:
|
||||
# - /home directory
|
||||
# - Kernel logs
|
||||
ProtectHome=tmpfs
|
||||
ProtectKernelLogs=true
|
||||
|
||||
# Make sure that the process can only see PIDs and process details of itself,
|
||||
# and the second option disables seeing details of things like system load and
|
||||
# I/O etc
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
|
||||
# While not needed, we set these options explicitly
|
||||
# - This process has been given access to the host network
|
||||
# - It can also communicate with any IP Address
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
IPAddressAllow=any
|
||||
|
||||
# Restrict system calls to a sane bunch
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged @resources @obsolete
|
||||
|
||||
# Misc restrictions
|
||||
# - Since the process is a python process it needs to be able to write and
|
||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
||||
RestrictSUIDSGID=true
|
||||
RemoveIPC=true
|
||||
NoNewPrivileges=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
LockPersonality=true
|
||||
PrivateUsers=true
|
||||
MemoryDenyWriteExecute=false
|
||||
30
debian/changelog
vendored
30
debian/changelog
vendored
@@ -1,33 +1,3 @@
|
||||
matrix-synapse-py3 (1.35.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.35.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jun 2021 13:23:35 +0100
|
||||
|
||||
matrix-synapse-py3 (1.34.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.34.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 May 2021 11:34:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 May 2021 11:17:59 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 May 2021 14:06:33 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 05 May 2021 14:15:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.32.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.32.2.
|
||||
|
||||
@@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -191,16 +191,6 @@ whilst running the above `docker run` commands.
|
||||
```
|
||||
--no-healthcheck
|
||||
```
|
||||
|
||||
## Disabling the healthcheck in docker-compose file
|
||||
|
||||
If you wish to disable the healthcheck via docker-compose, append the following to your service configuration.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
disable: true
|
||||
```
|
||||
|
||||
## Setting custom healthcheck on docker run
|
||||
|
||||
If you wish to point the healthcheck at a different port with docker command, add the following
|
||||
@@ -212,15 +202,14 @@ If you wish to point the healthcheck at a different port with docker command, ad
|
||||
## Setting the healthcheck in docker-compose file
|
||||
|
||||
You can add the following to set a custom healthcheck in a docker compose file.
|
||||
You will need docker-compose version >2.1 for this to work.
|
||||
You will need version >2.1 for this to work.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
@@ -9,11 +9,10 @@ formatters:
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
{% if LOG_FILE_PATH %}
|
||||
file:
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: {{ LOG_FILE_PATH }}
|
||||
filename: {{ LOG_FILE_PATH or "homeserver.log" }}
|
||||
when: "midnight"
|
||||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
@@ -30,7 +29,6 @@ handlers:
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
{% endif %}
|
||||
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Room State API](#room-state-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
@@ -428,7 +427,7 @@ the new room. Users on other servers will be unaffected.
|
||||
The API is:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id>
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
with a body of:
|
||||
@@ -529,15 +528,6 @@ You will have to manually handle, if you so choose, the following:
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
## Deprecated endpoint
|
||||
|
||||
The previous deprecated API will be removed in a future release, it was:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
It behaves the same way than the current endpoint except the path and the method.
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
|
||||
@@ -42,17 +42,17 @@ To receive OpenTracing spans, start up a Jaeger server. This can be done
|
||||
using docker like so:
|
||||
|
||||
```sh
|
||||
docker run -d --name jaeger \
|
||||
docker run -d --name jaeger
|
||||
-p 6831:6831/udp \
|
||||
-p 6832:6832/udp \
|
||||
-p 5778:5778 \
|
||||
-p 16686:16686 \
|
||||
-p 14268:14268 \
|
||||
jaegertracing/all-in-one:1
|
||||
jaegertracing/all-in-one:1.13
|
||||
```
|
||||
|
||||
Latest documentation is probably at
|
||||
https://www.jaegertracing.io/docs/latest/getting-started.
|
||||
<https://www.jaegertracing.io/docs/1.13/getting-started/>
|
||||
|
||||
## Enable OpenTracing in Synapse
|
||||
|
||||
@@ -62,7 +62,7 @@ as shown in the [sample config](./sample_config.yaml). For example:
|
||||
|
||||
```yaml
|
||||
opentracing:
|
||||
enabled: true
|
||||
tracer_enabled: true
|
||||
homeserver_whitelist:
|
||||
- "mytrustedhomeserver.org"
|
||||
- "*.myotherhomeservers.com"
|
||||
@@ -90,4 +90,4 @@ to two problems, namely:
|
||||
## Configuring Jaeger
|
||||
|
||||
Sampling strategies can be set as in this document:
|
||||
<https://www.jaegertracing.io/docs/latest/sampling/>.
|
||||
<https://www.jaegertracing.io/docs/1.13/sampling/>
|
||||
|
||||
200
docs/postgres.md
200
docs/postgres.md
@@ -1,6 +1,6 @@
|
||||
# Using Postgres
|
||||
|
||||
Synapse supports PostgreSQL versions 9.6 or later.
|
||||
Postgres version 9.5 or later is known to work.
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
@@ -33,15 +33,28 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
Then, create a postgres user and a database with:
|
||||
Then, create a user ``synapse_user`` with:
|
||||
|
||||
# this will prompt for a password for the new user
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
Before you can authenticate with the `synapse_user`, you must create a
|
||||
database that it can access. To create a database, first connect to the
|
||||
database with your database user:
|
||||
|
||||
The above will create a user called `synapse_user`, and a database called
|
||||
`synapse`.
|
||||
su - postgres # Or: sudo -u postgres bash
|
||||
psql
|
||||
|
||||
and then run:
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named `synapse` owned by the
|
||||
`synapse_user` user (which must already have been created as above).
|
||||
|
||||
Note that the PostgreSQL database *must* have the correct encoding set
|
||||
(as shown above), otherwise it will not be able to store UTF8 strings.
|
||||
@@ -50,6 +63,79 @@ You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
## Synapse config
|
||||
|
||||
When you are ready to start using PostgreSQL, edit the `database`
|
||||
@@ -79,42 +165,18 @@ may block for an extended period while it waits for a response from the
|
||||
database server. Example values might be:
|
||||
|
||||
```yaml
|
||||
database:
|
||||
args:
|
||||
# ... as above
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
|
||||
## Porting from SQLite
|
||||
|
||||
### Overview
|
||||
@@ -123,8 +185,9 @@ The script `synapse_port_db` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase
|
||||
process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location and run
|
||||
the port script against that offline database.
|
||||
1. Copy the existing SQLite database to a separate location (while the
|
||||
server is down) and running the port script against that offline
|
||||
database.
|
||||
2. Shut down the server. Rerun the port script to port any data that
|
||||
has come in since taking the first snapshot. Restart server against
|
||||
the PostgreSQL database.
|
||||
@@ -182,60 +245,3 @@ PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||
./synctl start
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Alternative auth methods
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -28,11 +28,7 @@ async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
|
||||
which can be given a list of local or remote MXIDs to broadcast known, online user
|
||||
presence to (for those users that the receiving user is considered interested in).
|
||||
It does not include state for users who are currently offline, and it can only be
|
||||
called on workers that support sending federation. Additionally, this method must
|
||||
only be called from the process that has been configured to write to the
|
||||
the [presence stream](https://github.com/matrix-org/synapse/blob/master/docs/workers.md#stream-writers).
|
||||
By default, this is the main process, but another worker can be configured to do
|
||||
so.
|
||||
called on workers that support sending federation.
|
||||
|
||||
### Module structure
|
||||
|
||||
|
||||
@@ -152,16 +152,6 @@ presence:
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -683,6 +673,33 @@ acme:
|
||||
#
|
||||
account_key_file: DATADIR/acme_account.key
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
|
||||
## Federation ##
|
||||
|
||||
@@ -714,12 +731,6 @@ acme:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -799,7 +810,6 @@ caches:
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
@@ -1494,7 +1504,6 @@ room_prejoin_state:
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
# - m.room.create
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
@@ -2818,8 +2827,7 @@ opentracing:
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# See docs/opentracing.rst
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -2828,26 +2836,19 @@ opentracing:
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
@@ -2916,18 +2917,3 @@ redis:
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
|
||||
# Enable experimental features in Synapse.
|
||||
#
|
||||
# Experimental features might break or be removed without a deprecation
|
||||
# period.
|
||||
#
|
||||
experimental_features:
|
||||
# Support for Spaces (MSC1772), it enables the following:
|
||||
#
|
||||
# * The Spaces Summary API (MSC2946).
|
||||
# * Restricting room membership based on space membership (MSC3083).
|
||||
#
|
||||
# Uncomment to disable support for Spaces.
|
||||
#spaces_enabled: false
|
||||
|
||||
@@ -67,8 +67,8 @@ A custom mapping provider must specify the following methods:
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `sub` claim of the response.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``sub`` claim of the response.
|
||||
* `map_user_attributes(self, userinfo, token, failures)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
@@ -87,9 +87,7 @@ A custom mapping provider must specify the following methods:
|
||||
`localpart` value, such as `john.doe1`.
|
||||
- Returns a dictionary with two keys:
|
||||
- `localpart`: A string, used to generate the Matrix ID. If this is
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
`None`, the user is prompted to pick their own username.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
* `get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
@@ -155,8 +153,8 @@ A custom mapping provider must specify the following methods:
|
||||
information from.
|
||||
- `client_redirect_url` - A string, the URL that the client will be
|
||||
redirected to.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `uid` claim of the response.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``uid`` claim of the response.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
@@ -174,10 +172,8 @@ A custom mapping provider must specify the following methods:
|
||||
redirected to.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - A string, the mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
* `mxid_localpart` - The mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `emails` - A list of emails for the new user. If not provided, will
|
||||
|
||||
@@ -65,33 +65,3 @@ systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
## Hardening
|
||||
|
||||
**Optional:** If further hardening is desired, the file
|
||||
`override-hardened.conf` may be copied from
|
||||
`contrib/systemd/override-hardened.conf` in this repository to the location
|
||||
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
|
||||
directory may have to be created). It enables certain sandboxing features in
|
||||
systemd to further secure the synapse service. You may read the comments to
|
||||
understand what the override file is doing. The same file will need to be copied
|
||||
to
|
||||
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
|
||||
(this directory may also have to be created) in order to apply the same
|
||||
hardening options to any worker processes.
|
||||
|
||||
Once these files have been copied to their appropriate locations, simply reload
|
||||
systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically
|
||||
be applied at every restart as long as the override files are present at the
|
||||
specified locations.
|
||||
|
||||
```sh
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart services
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
In order to see their effect, you may run `systemd-analyze security
|
||||
matrix-synapse.service` before and after applying the hardening options to see
|
||||
the changes being applied at a glance.
|
||||
|
||||
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
16
mypy.ini
16
mypy.ini
@@ -41,6 +41,7 @@ files =
|
||||
synapse/push,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
@@ -71,13 +72,8 @@ files =
|
||||
synapse/types.py,
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/daemonize.py,
|
||||
synapse/util/hash.py,
|
||||
synapse/util/iterutils.py,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/module_loader.py,
|
||||
synapse/util/msisdn.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
@@ -85,7 +81,6 @@ files =
|
||||
tests/handlers/test_password_providers.py,
|
||||
tests/rest/client/v1/test_login.py,
|
||||
tests/rest/client/v2_alpha/test_auth.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
@@ -177,12 +172,3 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-txacme.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -21,18 +21,17 @@ DISTS = (
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:focal",
|
||||
"ubuntu:groovy",
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
DESC = '''\
|
||||
Builds .debs for synapse, using a Docker image for the build environment.
|
||||
|
||||
By default, builds for all known distributions, but a list of distributions
|
||||
can be passed on the commandline for debugging.
|
||||
"""
|
||||
'''
|
||||
|
||||
|
||||
class Builder(object):
|
||||
@@ -46,7 +45,7 @@ class Builder(object):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
print("not building %s due to earlier failure" % (dist,))
|
||||
print("not building %s due to earlier failure" % (dist, ))
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
@@ -68,65 +67,48 @@ class Builder(object):
|
||||
# we tend to get source packages which are full of debs. (We could hack
|
||||
# around that with more magic in the build_debian.sh script, but that
|
||||
# doesn't solve the problem for natively-run dpkg-buildpakage).
|
||||
debsdir = os.path.join(projdir, "../debs")
|
||||
debsdir = os.path.join(projdir, '../debs')
|
||||
os.makedirs(debsdir, exist_ok=True)
|
||||
|
||||
if self.redirect_stdout:
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
|
||||
print("building %s: directing output to %s" % (dist, logfile))
|
||||
stdout = open(logfile, "w")
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
# first build a docker image for the build environment
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"build",
|
||||
"--tag",
|
||||
"dh-venv-builder:" + tag,
|
||||
"--build-arg",
|
||||
"distro=" + dist,
|
||||
"-f",
|
||||
"docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
subprocess.check_call([
|
||||
"docker", "build",
|
||||
"--tag", "dh-venv-builder:" + tag,
|
||||
"--build-arg", "distro=" + dist,
|
||||
"-f", "docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
container_name = "synapse_build_" + tag
|
||||
with self._lock:
|
||||
self.active_containers.add(container_name)
|
||||
|
||||
# then run the build itself
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--name",
|
||||
container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e",
|
||||
"TARGET_USERID=%i" % (os.getuid(),),
|
||||
"-e",
|
||||
"TARGET_GROUPID=%i" % (os.getgid(),),
|
||||
"-e",
|
||||
"DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
subprocess.check_call([
|
||||
"docker", "run",
|
||||
"--rm",
|
||||
"--name", container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
with self._lock:
|
||||
self.active_containers.remove(container_name)
|
||||
|
||||
if stdout is not None:
|
||||
stdout.close()
|
||||
print("Completed build of %s" % (dist,))
|
||||
print("Completed build of %s" % (dist, ))
|
||||
|
||||
def kill_containers(self):
|
||||
with self._lock:
|
||||
@@ -134,14 +116,9 @@ class Builder(object):
|
||||
|
||||
for c in active:
|
||||
print("killing container %s" % (c,))
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"kill",
|
||||
c,
|
||||
],
|
||||
stdout=subprocess.DEVNULL,
|
||||
)
|
||||
subprocess.run([
|
||||
"docker", "kill", c,
|
||||
], stdout=subprocess.DEVNULL)
|
||||
with self._lock:
|
||||
self.active_containers.remove(c)
|
||||
|
||||
@@ -152,38 +129,31 @@ def run_builds(dists, jobs=1, skip_tests=False):
|
||||
def sig(signum, _frame):
|
||||
print("Caught SIGINT")
|
||||
builder.kill_containers()
|
||||
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for _ in res:
|
||||
for r in res:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(
|
||||
description=DESC,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-j",
|
||||
"--jobs",
|
||||
type=int,
|
||||
default=1,
|
||||
help="specify the number of builds to run in parallel",
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-check",
|
||||
action="store_true",
|
||||
help="skip running tests after building",
|
||||
'--no-check', action='store_true',
|
||||
help='skip running tests after building',
|
||||
)
|
||||
parser.add_argument(
|
||||
"dist",
|
||||
nargs="*",
|
||||
default=DISTS,
|
||||
help="a list of distributions to build for. Default: %(default)s",
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
|
||||
@@ -10,9 +10,6 @@
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
@@ -35,26 +32,10 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
BASE_IMAGE=matrixdotorg/synapse-workers
|
||||
BASE_DOCKERFILE=docker/Dockerfile-workers
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
# And provide some more configuration to complement.
|
||||
export COMPLEMENT_CA=true
|
||||
export COMPLEMENT_VERSION_CHECK_ITERATIONS=500
|
||||
else
|
||||
BASE_IMAGE=matrixdotorg/synapse
|
||||
BASE_DOCKERFILE=docker/Dockerfile
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t $BASE_IMAGE -f "$BASE_DOCKERFILE" .
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -65,4 +46,4 @@ if [[ -n "$1" ]]; then
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
@@ -53,9 +54,15 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
"server_name": server_name,
|
||||
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
|
||||
|
||||
def fingerprint(certificate):
|
||||
finger = hashlib.sha256(certificate)
|
||||
return {"sha256": encode_base64(finger.digest())}
|
||||
|
||||
|
||||
def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
@@ -80,22 +80,8 @@ else
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
# Note: this list aims to mirror the one in tox.ini
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
"scripts/export_signing_key"
|
||||
"scripts/generate_config"
|
||||
"scripts/generate_log_config"
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"scripts-dev/update_database"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite"
|
||||
)
|
||||
# Note: this list aims the mirror the one in tox.ini
|
||||
files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite")
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# It does so by having Synapse generate an up-to-date SQLite DB, then running
|
||||
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
|
||||
|
||||
export PGHOST="localhost"
|
||||
POSTGRES_HOST="localhost"
|
||||
POSTGRES_DB_NAME="synapse_full_schema.$$"
|
||||
|
||||
SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
|
||||
@@ -32,7 +32,7 @@ usage() {
|
||||
while getopts "p:co:h" opt; do
|
||||
case $opt in
|
||||
p)
|
||||
export PGUSER=$OPTARG
|
||||
POSTGRES_USERNAME=$OPTARG
|
||||
;;
|
||||
c)
|
||||
# Print all commands that are being executed
|
||||
@@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PGUSER" ]; then
|
||||
if [ -z "$POSTGRES_USERNAME" ]; then
|
||||
echo "No postgres username supplied"
|
||||
usage
|
||||
exit 1
|
||||
@@ -84,9 +84,8 @@ fi
|
||||
# Create the output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
read -rsp "Postgres password for '$PGUSER': " PGPASSWORD
|
||||
read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
|
||||
echo ""
|
||||
export PGPASSWORD
|
||||
|
||||
# Exit immediately if a command fails
|
||||
set -e
|
||||
@@ -132,9 +131,9 @@ report_stats: false
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
user: "$POSTGRES_USERNAME"
|
||||
host: "$POSTGRES_HOST"
|
||||
password: "$POSTGRES_PASSWORD"
|
||||
database: "$POSTGRES_DB_NAME"
|
||||
|
||||
# Suppress the key server warning.
|
||||
@@ -151,7 +150,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
createdb $POSTGRES_DB_NAME
|
||||
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
@@ -182,7 +181,7 @@ DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
@@ -30,11 +30,7 @@ def exit(status: int = 0, message: Optional[str] = None):
|
||||
def format_plain(public_key: nacl.signing.VerifyKey):
|
||||
print(
|
||||
"%s:%s %s"
|
||||
% (
|
||||
public_key.alg,
|
||||
public_key.version,
|
||||
encode_verify_key_base64(public_key),
|
||||
)
|
||||
% (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
|
||||
)
|
||||
|
||||
|
||||
@@ -54,10 +50,7 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"key_file",
|
||||
nargs="+",
|
||||
type=argparse.FileType("r"),
|
||||
help="The key file to read",
|
||||
"key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -70,7 +63,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--expiry-ts",
|
||||
type=int,
|
||||
default=int(time.time() * 1000) + 6 * 3600000,
|
||||
default=int(time.time() * 1000) + 6*3600000,
|
||||
help=(
|
||||
"The expiry time to use for -x, in milliseconds since 1970. The default "
|
||||
"is (now+6h)."
|
||||
|
||||
@@ -11,22 +11,23 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
default="CONFDIR",
|
||||
|
||||
help="The path where the config files are kept. Used to create filenames for "
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--data-dir",
|
||||
default="DATADIR",
|
||||
help="The path where the data files are kept. Used to create filenames for "
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--server-name",
|
||||
default="SERVERNAME",
|
||||
help="The server name. Used to initialise the server_name config param, but also "
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -40,22 +41,21 @@ if __name__ == "__main__":
|
||||
"--generate-secrets",
|
||||
action="store_true",
|
||||
help="Enable generation of new secrets for things like the macaroon_secret_key."
|
||||
"By default, these parameters will be left unset.",
|
||||
"By default, these parameters will be left unset."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=argparse.FileType("w"),
|
||||
"-o", "--output-file",
|
||||
type=argparse.FileType('w'),
|
||||
default=sys.stdout,
|
||||
help="File to write the configuration to. Default: stdout",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--header-file",
|
||||
type=argparse.FileType("r"),
|
||||
type=argparse.FileType('r'),
|
||||
help="File from which to read a header, which will be printed before the "
|
||||
"generated config.",
|
||||
"generated config.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -41,7 +41,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=argparse.FileType("r"),
|
||||
type=argparse.FileType('r'),
|
||||
help=(
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
@@ -72,8 +72,8 @@ if __name__ == "__main__":
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
hashed = bcrypt.hashpw(
|
||||
pw.encode("utf8") + password_pepper.encode("utf8"),
|
||||
pw.encode('utf8') + password_pepper.encode("utf8"),
|
||||
bcrypt.gensalt(bcrypt_rounds),
|
||||
).decode("ascii")
|
||||
).decode('ascii')
|
||||
|
||||
print(hashed)
|
||||
|
||||
@@ -294,7 +294,8 @@ class Porter(object):
|
||||
return table, already_ported, total_to_port, forward_chunk, backward_chunk
|
||||
|
||||
async def get_table_constraints(self) -> Dict[str, Set[str]]:
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on."""
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on.
|
||||
"""
|
||||
|
||||
def _get_constraints(txn):
|
||||
# We can pull the information about foreign key constraints out from
|
||||
@@ -503,9 +504,7 @@ class Porter(object):
|
||||
return
|
||||
|
||||
def build_db_store(
|
||||
self,
|
||||
db_config: DatabaseConnectionConfig,
|
||||
allow_outdated_version: bool = False,
|
||||
self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
|
||||
):
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
@@ -741,7 +740,7 @@ class Porter(object):
|
||||
return col
|
||||
|
||||
outrows = []
|
||||
for row in rows:
|
||||
for i, row in enumerate(rows):
|
||||
try:
|
||||
outrows.append(
|
||||
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
|
||||
@@ -891,7 +890,8 @@ class Porter(object):
|
||||
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
async def _setup_events_stream_seqs(self) -> None:
|
||||
"""Set the event stream sequences to the correct values."""
|
||||
"""Set the event stream sequences to the correct values.
|
||||
"""
|
||||
|
||||
# We get called before we've ported the events table, so we need to
|
||||
# fetch the current positions from the SQLite store.
|
||||
@@ -913,21 +913,18 @@ class Porter(object):
|
||||
(curr_forward_id + 1,),
|
||||
)
|
||||
|
||||
if curr_backward_id:
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s",
|
||||
(curr_backward_id + 1,),
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_events_stream_seqs",
|
||||
_setup_events_stream_seqs_set_pos,
|
||||
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
|
||||
)
|
||||
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
|
||||
"""Set a sequence to the correct value.
|
||||
"""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
@@ -941,32 +938,26 @@ class Porter(object):
|
||||
next_id = max(current_stream_ids) + 1
|
||||
|
||||
def r(txn):
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
|
||||
txn.execute(sql + " %s", (next_id,))
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
|
||||
txn.execute(sql + " %s", (next_id, ))
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_%s" % (sequence_name,), r
|
||||
)
|
||||
await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
|
||||
(curr_chain_id + 1,),
|
||||
(curr_chain_id,),
|
||||
)
|
||||
|
||||
if curr_chain_id is not None:
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id",
|
||||
r,
|
||||
)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_event_auth_chain_id", r,
|
||||
)
|
||||
|
||||
|
||||
|
||||
##############################################
|
||||
@@ -975,7 +966,8 @@ class Porter(object):
|
||||
|
||||
|
||||
class Progress(object):
|
||||
"""Used to report progress of the port"""
|
||||
"""Used to report progress of the port
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
@@ -1000,7 +992,8 @@ class Progress(object):
|
||||
|
||||
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window"""
|
||||
"""Reports progress to a curses window
|
||||
"""
|
||||
|
||||
def __init__(self, stdscr):
|
||||
self.stdscr = stdscr
|
||||
@@ -1025,7 +1018,7 @@ class CursesProgress(Progress):
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
for data in self.tables.values():
|
||||
for table, data in self.tables.items():
|
||||
self.total_processed += data["num_done"] - data["start"]
|
||||
self.total_remaining += data["total"] - data["num_done"]
|
||||
|
||||
@@ -1116,7 +1109,8 @@ class CursesProgress(Progress):
|
||||
|
||||
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal"""
|
||||
"""Just prints progress to the terminal
|
||||
"""
|
||||
|
||||
def update(self, table, num_done):
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
@@ -21,8 +21,8 @@ import os
|
||||
import sys
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 6):
|
||||
print("Synapse requires Python 3.6 or above.")
|
||||
if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.35.0"
|
||||
__version__ = "1.32.2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -87,7 +87,6 @@ class Auth:
|
||||
)
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
@@ -209,8 +208,6 @@ class Auth:
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("user_id", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
if user_id in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
|
||||
return requester
|
||||
|
||||
@@ -263,8 +260,6 @@ class Auth:
|
||||
opentracing.set_tag("user_id", user_info.user_id)
|
||||
if device_id:
|
||||
opentracing.set_tag("device_id", device_id)
|
||||
if user_info.token_owner in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
|
||||
return requester
|
||||
except KeyError:
|
||||
|
||||
@@ -110,18 +110,13 @@ class EventTypes:
|
||||
|
||||
Dummy = "org.matrix.dummy_event"
|
||||
|
||||
SpaceChild = "m.space.child"
|
||||
SpaceParent = "m.space.parent"
|
||||
MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child"
|
||||
MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent"
|
||||
|
||||
|
||||
class ToDeviceEventTypes:
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence = "m.presence"
|
||||
RoomKeyRequest = "m.room_key_request"
|
||||
|
||||
|
||||
class RejectedReason:
|
||||
@@ -179,7 +174,6 @@ class EventContentFields:
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/1772
|
||||
ROOM_TYPE = "type"
|
||||
MSC1772_ROOM_TYPE = "org.matrix.msc1772.type"
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,6 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
@@ -77,9 +76,6 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -128,20 +124,17 @@ class Ratelimiter:
|
||||
time_delta = time_now_s - time_start
|
||||
performed_count = action_count - time_delta * rate_hz
|
||||
if performed_count < 0:
|
||||
performed_count = 0
|
||||
# Allow, reset back to count 1
|
||||
allowed = True
|
||||
time_start = time_now_s
|
||||
|
||||
# This check would be easier read as performed_count + n_actions > burst_count,
|
||||
# but performed_count might be a very precise float (with lots of numbers
|
||||
# following the point) in which case Python might round it up when adding it to
|
||||
# n_actions. Writing it this way ensures it doesn't happen.
|
||||
if performed_count > burst_count - n_actions:
|
||||
action_count = 1.0
|
||||
elif performed_count > burst_count - 1.0:
|
||||
# Deny, we have exceeded our burst count
|
||||
allowed = False
|
||||
else:
|
||||
# We haven't reached our limit yet
|
||||
allowed = True
|
||||
action_count = performed_count + n_actions
|
||||
action_count += 1.0
|
||||
|
||||
if update:
|
||||
self.actions[key] = (action_count, time_start, rate_hz)
|
||||
@@ -189,7 +182,6 @@ class Ratelimiter:
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[int] = None,
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
@@ -209,9 +201,6 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
@@ -227,7 +216,6 @@ class Ratelimiter:
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
_time_now_s=time_now_s,
|
||||
)
|
||||
|
||||
|
||||
@@ -37,7 +37,6 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
@@ -116,7 +115,6 @@ def start_reactor(
|
||||
|
||||
def run():
|
||||
logger.info("Running")
|
||||
setup_jemalloc_stats()
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events, login, presence, room
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
@@ -236,6 +237,7 @@ class GenericWorkerSlavedStore(
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedFilteringStore,
|
||||
@@ -452,10 +454,6 @@ def start(config_options):
|
||||
config.server.update_user_directory = False
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
config.server_name,
|
||||
|
||||
@@ -341,10 +341,6 @@ def setup(config_options):
|
||||
sys.exit(0)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
|
||||
if config.server.gc_seconds:
|
||||
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
|
||||
@@ -88,6 +88,10 @@ class ApiConfig(Config):
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
|
||||
if self.spaces_enabled:
|
||||
# MSC1772 suggests adding m.room.create to the prejoin state
|
||||
yield EventTypes.Create
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
|
||||
|
||||
@@ -105,8 +109,6 @@ _DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
# Per MSC1772.
|
||||
EventTypes.Create,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -17,8 +17,6 @@ import re
|
||||
import threading
|
||||
from typing import Callable, Dict
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
# The prefix for all cache factor-related environment variables
|
||||
@@ -191,15 +189,6 @@ class CacheConfig(Config):
|
||||
)
|
||||
self.cache_factors[cache] = factor
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
try:
|
||||
check_requirements("cache_memory")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
|
||||
@@ -58,7 +58,6 @@ DEFAULT_CONFIG = """\
|
||||
# password: secretpassword
|
||||
# database: synapse
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
|
||||
@@ -29,26 +29,9 @@ class ExperimentalConfig(Config):
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
|
||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
if self.spaces_enabled:
|
||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
# Enable experimental features in Synapse.
|
||||
#
|
||||
# Experimental features might break or be removed without a deprecation
|
||||
# period.
|
||||
#
|
||||
experimental_features:
|
||||
# Support for Spaces (MSC1772), it enables the following:
|
||||
#
|
||||
# * The Spaces Summary API (MSC2946).
|
||||
# * Restricting room membership based on space membership (MSC3083).
|
||||
#
|
||||
# Uncomment to disable support for Spaces.
|
||||
#spaces_enabled: false
|
||||
"""
|
||||
|
||||
@@ -44,10 +44,6 @@ class FederationConfig(Config):
|
||||
"allow_profile_lookup_over_federation", True
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """\
|
||||
## Federation ##
|
||||
@@ -79,12 +75,6 @@ class FederationConfig(Config):
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ class HomeServerConfig(RootConfig):
|
||||
|
||||
config_classes = [
|
||||
ServerConfig,
|
||||
ExperimentalConfig,
|
||||
TlsConfig,
|
||||
FederationConfig,
|
||||
CacheConfig,
|
||||
@@ -93,5 +94,4 @@ class HomeServerConfig(RootConfig):
|
||||
TracerConfig,
|
||||
WorkerConfig,
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
]
|
||||
|
||||
@@ -349,4 +349,4 @@ class RegistrationConfig(Config):
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.enable_registration is not None:
|
||||
self.enable_registration = strtobool(str(args.enable_registration))
|
||||
self.enable_registration = bool(strtobool(str(args.enable_registration)))
|
||||
|
||||
@@ -164,13 +164,7 @@ class SAML2Config(Config):
|
||||
config_path = saml2_config.get("config_path", None)
|
||||
if config_path is not None:
|
||||
mod = load_python_module(config_path)
|
||||
config = getattr(mod, "CONFIG", None)
|
||||
if config is None:
|
||||
raise ConfigError(
|
||||
"Config path specified by saml2_config.config_path does not "
|
||||
"have a CONFIG property."
|
||||
)
|
||||
_dict_merge(merge_dict=config, into_dict=saml2_config_dict)
|
||||
_dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict)
|
||||
|
||||
import saml2.config
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -572,7 +572,6 @@ class ServerConfig(Config):
|
||||
_warn_if_webclient_configured(self.listeners)
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig:
|
||||
@@ -918,16 +917,6 @@ class ServerConfig(Config):
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# The minimum time in seconds between each GC for a generation, regardless of
|
||||
# the GC thresholds. This ensures that we don't do GC too frequently.
|
||||
#
|
||||
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
|
||||
# generation 0 GCs, etc.
|
||||
#
|
||||
# Defaults to `[1s, 10s, 30s]`.
|
||||
#
|
||||
#gc_min_interval: [0.5s, 30s, 1m]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is 100. -1 means no upper limit.
|
||||
#
|
||||
@@ -1316,24 +1305,6 @@ class ServerConfig(Config):
|
||||
help="Turn on the twisted telnet manhole service on the given port.",
|
||||
)
|
||||
|
||||
def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]:
|
||||
"""Reads the three durations for the GC min interval option, returning seconds."""
|
||||
if durations is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
if len(durations) != 3:
|
||||
raise ValueError()
|
||||
return (
|
||||
self.parse_duration(durations[0]) / 1000,
|
||||
self.parse_duration(durations[1]) / 1000,
|
||||
self.parse_duration(durations[2]) / 1000,
|
||||
)
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
"Value of `gc_min_interval` must be a list of three durations if set"
|
||||
)
|
||||
|
||||
|
||||
def is_threepid_reserved(reserved_threepids, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
|
||||
@@ -16,7 +16,10 @@ import logging
|
||||
import os
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Pattern
|
||||
from hashlib import sha256
|
||||
from typing import List, Optional
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
|
||||
@@ -80,6 +83,13 @@ class TlsConfig(Config):
|
||||
"configured."
|
||||
)
|
||||
|
||||
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
|
||||
|
||||
if self._original_tls_fingerprints is None:
|
||||
self._original_tls_fingerprints = []
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Whether to verify certificates on outbound federation traffic
|
||||
self.federation_verify_certificates = config.get(
|
||||
"federation_verify_certificates", True
|
||||
@@ -114,7 +124,7 @@ class TlsConfig(Config):
|
||||
fed_whitelist_entries = []
|
||||
|
||||
# Support globs (*) in whitelist values
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
|
||||
self.federation_certificate_verification_whitelist = [] # type: List[str]
|
||||
for entry in fed_whitelist_entries:
|
||||
try:
|
||||
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
|
||||
@@ -238,6 +248,19 @@ class TlsConfig(Config):
|
||||
e,
|
||||
)
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
if self.tls_certificate:
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
|
||||
|
||||
def generate_config_section(
|
||||
self,
|
||||
config_dir_path,
|
||||
@@ -420,6 +443,33 @@ class TlsConfig(Config):
|
||||
# If unspecified, we will use CONFDIR/client.key.
|
||||
#
|
||||
account_key_file: %(default_acme_account_file)s
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
"""
|
||||
# Lowercase the string representation of boolean values
|
||||
% {
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Set
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -34,8 +32,6 @@ class TracerConfig(Config):
|
||||
{"sampler": {"type": "const", "param": 1}, "logging": False},
|
||||
)
|
||||
|
||||
self.force_tracing_for_users: Set[str] = set()
|
||||
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
@@ -52,19 +48,6 @@ class TracerConfig(Config):
|
||||
if not isinstance(self.opentracer_whitelist, list):
|
||||
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||
|
||||
force_tracing_for_users = opentracing_config.get("force_tracing_for_users", [])
|
||||
if not isinstance(force_tracing_for_users, list):
|
||||
raise ConfigError(
|
||||
"Expected a list", ("opentracing", "force_tracing_for_users")
|
||||
)
|
||||
for i, u in enumerate(force_tracing_for_users):
|
||||
if not isinstance(u, str):
|
||||
raise ConfigError(
|
||||
"Expected a string",
|
||||
("opentracing", "force_tracing_for_users", f"index {i}"),
|
||||
)
|
||||
self.force_tracing_for_users.add(u)
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## Opentracing ##
|
||||
@@ -81,8 +64,7 @@ class TracerConfig(Config):
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# See docs/opentracing.rst
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -91,26 +73,19 @@ class TracerConfig(Config):
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
"""
|
||||
|
||||
@@ -17,7 +17,7 @@ import abc
|
||||
import logging
|
||||
import urllib
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
import attr
|
||||
from signedjson.key import (
|
||||
@@ -42,8 +42,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.config.key import TrustedKeyServer
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import prune_event_dict
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
@@ -71,11 +69,7 @@ class VerifyJsonRequest:
|
||||
Attributes:
|
||||
server_name: The name of the server to verify against.
|
||||
|
||||
get_json_object: A callback to fetch the JSON object to verify.
|
||||
A callback is used to allow deferring the creation of the JSON
|
||||
object to verify until needed, e.g. for events we can defer
|
||||
creating the redacted copy. This reduces the memory usage when
|
||||
there are large numbers of in flight requests.
|
||||
json_object: The JSON object to verify.
|
||||
|
||||
minimum_valid_until_ts: time at which we require the signing key to
|
||||
be valid. (0 implies we don't care)
|
||||
@@ -94,50 +88,14 @@ class VerifyJsonRequest:
|
||||
"""
|
||||
|
||||
server_name = attr.ib(type=str)
|
||||
get_json_object = attr.ib(type=Callable[[], JsonDict])
|
||||
json_object = attr.ib(type=JsonDict)
|
||||
minimum_valid_until_ts = attr.ib(type=int)
|
||||
request_name = attr.ib(type=str)
|
||||
key_ids = attr.ib(type=List[str])
|
||||
key_ids = attr.ib(init=False, type=List[str])
|
||||
key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred)
|
||||
|
||||
@staticmethod
|
||||
def from_json_object(
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
minimum_valid_until_ms: int,
|
||||
request_name: str,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
lambda: json_object,
|
||||
minimum_valid_until_ms,
|
||||
request_name=request_name,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_event(
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on an event
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = list(event.signatures.get(server_name, []))
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
# We defer creating the redacted json object, as it uses a lot more
|
||||
# memory than the Event object itself.
|
||||
lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
|
||||
minimum_valid_until_ms,
|
||||
request_name=event.event_id,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
def __attrs_post_init__(self):
|
||||
self.key_ids = signature_ids(self.json_object, self.server_name)
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
@@ -189,13 +147,8 @@ class Keyring:
|
||||
Deferred[None]: completes if the the object was correctly signed, otherwise
|
||||
errbacks with an error
|
||||
"""
|
||||
request = VerifyJsonRequest.from_json_object(
|
||||
server_name,
|
||||
json_object,
|
||||
validity_time,
|
||||
request_name,
|
||||
)
|
||||
requests = (request,)
|
||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
requests = (req,)
|
||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||
|
||||
def verify_json_objects_for_server(
|
||||
@@ -222,41 +175,10 @@ class Keyring:
|
||||
logcontext.
|
||||
"""
|
||||
return self._verify_objects(
|
||||
VerifyJsonRequest.from_json_object(
|
||||
server_name, json_object, validity_time, request_name
|
||||
)
|
||||
VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
for server_name, json_object, validity_time, request_name in server_and_json
|
||||
)
|
||||
|
||||
def verify_events_for_server(
|
||||
self, server_and_events: Iterable[Tuple[str, EventBase, int]]
|
||||
) -> List[defer.Deferred]:
|
||||
"""Bulk verification of signatures on events.
|
||||
|
||||
Args:
|
||||
server_and_events:
|
||||
Iterable of `(server_name, event, validity_time)` tuples.
|
||||
|
||||
`server_name` is which server we are verifying the signature for
|
||||
on the event.
|
||||
|
||||
`event` is the event that we'll verify the signatures of for
|
||||
the given `server_name`.
|
||||
|
||||
`validity_time` is a timestamp at which the signing key must be
|
||||
valid.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||
or failure to verify each event's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
"""
|
||||
return self._verify_objects(
|
||||
VerifyJsonRequest.from_event(server_name, event, validity_time)
|
||||
for server_name, event, validity_time in server_and_events
|
||||
)
|
||||
|
||||
def _verify_objects(
|
||||
self, verify_requests: Iterable[VerifyJsonRequest]
|
||||
) -> List[defer.Deferred]:
|
||||
@@ -970,7 +892,7 @@ async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = await verify_request.key_ready
|
||||
|
||||
json_object = verify_request.get_json_object()
|
||||
json_object = verify_request.json_object
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
|
||||
@@ -20,7 +20,6 @@ from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple,
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -114,9 +113,7 @@ class SpamChecker:
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> bool:
|
||||
async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
@@ -137,7 +137,11 @@ class FederationBase:
|
||||
return deferreds
|
||||
|
||||
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
|
||||
class PduToCheckSig(
|
||||
namedtuple(
|
||||
"PduToCheckSig", ["pdu", "redacted_pdu_json", "sender_domain", "deferreds"]
|
||||
)
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
@@ -180,6 +184,7 @@ def _check_sigs_on_pdus(
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
@@ -190,12 +195,13 @@ def _check_sigs_on_pdus(
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
|
||||
|
||||
more_deferreds = keyring.verify_events_for_server(
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
p.sender_domain,
|
||||
p.pdu,
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_sender
|
||||
]
|
||||
@@ -224,12 +230,13 @@ def _check_sigs_on_pdus(
|
||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_events_for_server(
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
get_domain_from_id(p.pdu.event_id),
|
||||
p.pdu,
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_event_id
|
||||
]
|
||||
|
||||
@@ -55,7 +55,6 @@ from synapse.api.room_versions import (
|
||||
)
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.logging.context import make_deferred_yieldable, preserve_fn
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
@@ -452,28 +451,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Returns true if the response was due to an endpoint being unimplemented.
|
||||
|
||||
Args:
|
||||
e: The error response received from the remote server.
|
||||
synapse_error: The above error converted to a SynapseError. This is
|
||||
automatically generated if not provided.
|
||||
|
||||
"""
|
||||
if synapse_error is None:
|
||||
synapse_error = e.to_synapse_error()
|
||||
# There is no good way to detect an "unknown" endpoint.
|
||||
#
|
||||
# Dendrite returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
return e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
)
|
||||
|
||||
async def _try_destination_list(
|
||||
self,
|
||||
description: str,
|
||||
@@ -491,9 +468,9 @@ class FederationClient(FederationBase):
|
||||
callback: Function to run for each server. Passed a single
|
||||
argument: the server_name to try.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code or
|
||||
an UnsupportedRoomVersionError, attempts to perform the operation
|
||||
stop immediately and the exception is reraised.
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
reraised.
|
||||
|
||||
Otherwise, if the callback raises an Exception the error is logged and the
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
@@ -515,7 +492,8 @@ class FederationClient(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
return await callback(destination)
|
||||
res = await callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except UnsupportedRoomVersionError:
|
||||
@@ -524,15 +502,17 @@ class FederationClient(FederationBase):
|
||||
synapse_error = e.to_synapse_error()
|
||||
failover = False
|
||||
|
||||
# Failover on an internal server error, or if the destination
|
||||
# doesn't implemented the endpoint for some reason.
|
||||
if 500 <= e.code < 600:
|
||||
failover = True
|
||||
|
||||
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
||||
e, synapse_error
|
||||
):
|
||||
failover = True
|
||||
elif failover_on_unknown_endpoint:
|
||||
# there is no good way to detect an "unknown" endpoint. Dendrite
|
||||
# returns a 404 (with no body); synapse returns a 400
|
||||
# with M_UNRECOGNISED.
|
||||
if e.code == 404 or (
|
||||
e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED
|
||||
):
|
||||
failover = True
|
||||
|
||||
if not failover:
|
||||
raise synapse_error from e
|
||||
@@ -590,8 +570,9 @@ class FederationClient(FederationBase):
|
||||
UnsupportedRoomVersionError: if remote responds with
|
||||
a room version we don't understand.
|
||||
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -661,15 +642,25 @@ class FederationClient(FederationBase):
|
||||
``auth_chain``.
|
||||
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
SynapseError: if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError: if no servers were reachable.
|
||||
"""
|
||||
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
response = await self._do_send_join(room_version, destination, pdu)
|
||||
content = await self._do_send_join(destination, pdu)
|
||||
|
||||
state = response.state
|
||||
auth_chain = response.auth_events
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
||||
|
||||
@@ -682,7 +673,7 @@ class FederationClient(FederationBase):
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise InvalidResponseError("No create event in state")
|
||||
raise SynapseError(400, "No create event in state")
|
||||
|
||||
# the room version should be sane.
|
||||
create_room_version = create_event.content.get(
|
||||
@@ -744,36 +735,41 @@ class FederationClient(FederationBase):
|
||||
|
||||
return await self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
async def _do_send_join(
|
||||
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
||||
) -> SendJoinResponse:
|
||||
async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict:
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
return await self.transport_layer.send_join_v2(
|
||||
room_version=room_version,
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
return await self.transport_layer.send_join_v1(
|
||||
room_version=room_version,
|
||||
resp = await self.transport_layer.send_join_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
# We expect the v1 API to respond with [200, content], so we only return the
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
async def send_invite(
|
||||
self,
|
||||
destination: str,
|
||||
@@ -806,11 +802,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
Returns:
|
||||
The event as a dict as returned by the remote server
|
||||
|
||||
Raises:
|
||||
SynapseError: if the remote server returns an error or if the server
|
||||
only supports the v1 endpoint and a room version other than "1"
|
||||
or "2" is requested.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
@@ -826,19 +817,28 @@ class FederationClient(FederationBase):
|
||||
},
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
||||
# Otherwise consider it a legitmate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API. That's ok provided the room uses old-style event
|
||||
# IDs.
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code in (403, 429):
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
raise err
|
||||
raise
|
||||
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
@@ -865,8 +865,9 @@ class FederationClient(FederationBase):
|
||||
pdu: event to be sent
|
||||
|
||||
Raises:
|
||||
SynapseError: if the chosen remote server returns a 300/400 code, or
|
||||
no servers successfully handle the request.
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError if no servers were reachable.
|
||||
"""
|
||||
|
||||
async def send_request(destination: str) -> None:
|
||||
@@ -888,11 +889,16 @@ class FederationClient(FederationBase):
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the v1 endpoint. Otherwise consider it a legitmate error
|
||||
# and raise.
|
||||
if not self._is_unknown_endpoint(e):
|
||||
raise
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
@@ -864,6 +865,14 @@ class FederationHandlerRegistry:
|
||||
# EDU received.
|
||||
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
)
|
||||
|
||||
def register_edu_handler(
|
||||
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
||||
) -> None:
|
||||
@@ -917,6 +926,16 @@ class FederationHandlerRegistry:
|
||||
if not self.config.use_presence and edu_type == EduTypes.Presence:
|
||||
return
|
||||
|
||||
# If the incoming room key requests from a particular origin are over
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not await self._room_key_request_rate_limiter.can_do_action(
|
||||
None, origin
|
||||
)
|
||||
):
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
|
||||
@@ -14,19 +14,26 @@
|
||||
|
||||
import abc
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics import (
|
||||
LaterGauge,
|
||||
event_processing_loop_counter,
|
||||
@@ -255,15 +262,27 @@ class FederationSender(AbstractFederationSender):
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
async def get_destinations_for_event(
|
||||
event: EventBase,
|
||||
) -> Collection[str]:
|
||||
"""Computes the destinations to which this event must be sent.
|
||||
|
||||
This returns an empty tuple when there are no destinations to send to,
|
||||
or if this event is not from this homeserver and it is not sending
|
||||
it on behalf of another server.
|
||||
|
||||
Will also filter out destinations which this sender is not responsible for,
|
||||
if multiple federation senders exist.
|
||||
"""
|
||||
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
return
|
||||
return ()
|
||||
|
||||
if not event.internal_metadata.should_proactively_send():
|
||||
return
|
||||
return ()
|
||||
|
||||
destinations = None # type: Optional[Set[str]]
|
||||
if not event.prev_event_ids():
|
||||
@@ -298,7 +317,7 @@ class FederationSender(AbstractFederationSender):
|
||||
"Failed to calculate hosts in room for event: %s",
|
||||
event.event_id,
|
||||
)
|
||||
return
|
||||
return ()
|
||||
|
||||
destinations = {
|
||||
d
|
||||
@@ -308,17 +327,15 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
}
|
||||
|
||||
destinations.discard(self.server_name)
|
||||
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
if destinations:
|
||||
await self._send_pdu(event, destinations)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(event.event_id)
|
||||
|
||||
@@ -326,24 +343,29 @@ class FederationSender(AbstractFederationSender):
|
||||
"federation_sender"
|
||||
).observe((now - ts) / 1000)
|
||||
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
await handle_event(event)
|
||||
return destinations
|
||||
return ()
|
||||
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
async def get_federatable_events_and_destinations(
|
||||
events: Iterable[EventBase],
|
||||
) -> List[Tuple[EventBase, Collection[str]]]:
|
||||
with Measure(self.clock, "get_destinations_for_events"):
|
||||
# Fetch federation destinations per event,
|
||||
# skip if get_destinations_for_event returns an empty collection,
|
||||
# return list of event->destinations pairs.
|
||||
return [
|
||||
(event, dests)
|
||||
for (event, dests) in [
|
||||
(event, await get_destinations_for_event(event))
|
||||
for event in events
|
||||
]
|
||||
if dests
|
||||
]
|
||||
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(handle_room_events, evs)
|
||||
for evs in events_by_room.values()
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
events_and_dests = await get_federatable_events_and_destinations(events)
|
||||
|
||||
# Send corresponding events to each destination queue
|
||||
await self._distribute_events(events_and_dests)
|
||||
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
@@ -361,7 +383,7 @@ class FederationSender(AbstractFederationSender):
|
||||
events_processed_counter.inc(len(events))
|
||||
|
||||
event_processing_loop_room_count.labels("federation_sender").inc(
|
||||
len(events_by_room)
|
||||
len({event.room_id for event in events})
|
||||
)
|
||||
|
||||
event_processing_loop_counter.labels("federation_sender").inc()
|
||||
@@ -373,34 +395,53 @@ class FederationSender(AbstractFederationSender):
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
async def _distribute_events(
|
||||
self,
|
||||
events_and_dests: Iterable[Tuple[EventBase, Collection[str]]],
|
||||
) -> None:
|
||||
"""Distribute events to the respective per_destination queues.
|
||||
|
||||
destinations = set(destinations)
|
||||
destinations.discard(self.server_name)
|
||||
logger.debug("Sending to: %s", str(destinations))
|
||||
Also persists last-seen per-room stream_ordering to 'destination_rooms'.
|
||||
|
||||
if not destinations:
|
||||
return
|
||||
Args:
|
||||
events_and_dests: A list of tuples, which are (event: EventBase, destinations: Collection[str]).
|
||||
Every event is paired with its intended destinations (in federation).
|
||||
"""
|
||||
# Tuples of room_id + destination to their max-seen stream_ordering
|
||||
room_with_dest_stream_ordering = {} # type: Dict[Tuple[str, str], int]
|
||||
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
# List of events to send to each destination
|
||||
events_by_dest = {} # type: Dict[str, List[EventBase]]
|
||||
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
# For each event-destinations pair...
|
||||
for event, destinations in events_and_dests:
|
||||
|
||||
# track the fact that we have a PDU for these destinations,
|
||||
# to allow us to perform catch-up later on if the remote is unreachable
|
||||
# for a while.
|
||||
await self.store.store_destination_rooms_entries(
|
||||
destinations,
|
||||
pdu.room_id,
|
||||
pdu.internal_metadata.stream_ordering,
|
||||
# (we got this from the database, it's filled)
|
||||
assert event.internal_metadata.stream_ordering
|
||||
|
||||
sent_pdus_destination_dist_total.inc(len(destinations))
|
||||
sent_pdus_destination_dist_count.inc()
|
||||
|
||||
# ...iterate over those destinations..
|
||||
for destination in destinations:
|
||||
# ...update their stream-ordering...
|
||||
room_with_dest_stream_ordering[(event.room_id, destination)] = max(
|
||||
event.internal_metadata.stream_ordering,
|
||||
room_with_dest_stream_ordering.get((event.room_id, destination), 0),
|
||||
)
|
||||
|
||||
# ...and add the event to each destination queue.
|
||||
events_by_dest.setdefault(destination, []).append(event)
|
||||
|
||||
# Bulk-store destination_rooms stream_ids
|
||||
await self.store.bulk_store_destination_rooms_entries(
|
||||
room_with_dest_stream_ordering
|
||||
)
|
||||
|
||||
for destination in destinations:
|
||||
self._get_per_destination_queue(destination).send_pdu(pdu)
|
||||
for destination, pdus in events_by_dest.items():
|
||||
logger.debug("Sending %d pdus to %s", len(pdus), destination)
|
||||
|
||||
self._get_per_destination_queue(destination).send_pdus(pdus)
|
||||
|
||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||
"""Send a RR to any other servers in the room
|
||||
|
||||
@@ -28,7 +28,6 @@ from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag
|
||||
from synapse.metrics import sent_transactions_counter
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -155,19 +154,22 @@ class PerDestinationQueue:
|
||||
+ len(self._pending_edus_keyed)
|
||||
)
|
||||
|
||||
def send_pdu(self, pdu: EventBase) -> None:
|
||||
"""Add a PDU to the queue, and start the transmission loop if necessary
|
||||
def send_pdus(self, pdus: Iterable[EventBase]) -> None:
|
||||
"""Add PDUs to the queue, and start the transmission loop if necessary
|
||||
|
||||
Args:
|
||||
pdu: pdu to send
|
||||
pdus: pdus to send
|
||||
"""
|
||||
if not self._catching_up or self._last_successful_stream_ordering is None:
|
||||
# only enqueue the PDU if we are not catching up (False) or do not
|
||||
# yet know if we have anything to catch up (None)
|
||||
self._pending_pdus.append(pdu)
|
||||
self._pending_pdus.extend(pdus)
|
||||
else:
|
||||
assert pdu.internal_metadata.stream_ordering
|
||||
self._catchup_last_skipped = pdu.internal_metadata.stream_ordering
|
||||
self._catchup_last_skipped = max(
|
||||
pdu.internal_metadata.stream_ordering
|
||||
for pdu in pdus
|
||||
if pdu.internal_metadata.stream_ordering is not None
|
||||
)
|
||||
|
||||
self.attempt_new_transaction()
|
||||
|
||||
@@ -575,14 +577,6 @@ class PerDestinationQueue:
|
||||
for content in contents
|
||||
]
|
||||
|
||||
if edus:
|
||||
issue9533_logger.debug(
|
||||
"Sending %i to-device messages to %s, up to stream id %i",
|
||||
len(edus),
|
||||
self._destination,
|
||||
stream_id,
|
||||
)
|
||||
|
||||
return (edus, stream_id)
|
||||
|
||||
def _start_catching_up(self) -> None:
|
||||
|
||||
@@ -17,29 +17,18 @@ import logging
|
||||
import urllib
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import attr
|
||||
import ijson
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.http.matrixfederationclient import ByteParser
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Send join responses can be huge, so we set a separate limit here. The response
|
||||
# is parsed in a streaming manner, which helps alleviate the issue of memory
|
||||
# usage a bit.
|
||||
MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
|
||||
|
||||
|
||||
class TransportLayerClient:
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
@@ -251,38 +240,21 @@ class TransportLayerClient:
|
||||
return content
|
||||
|
||||
@log_function
|
||||
async def send_join_v1(
|
||||
self,
|
||||
room_version,
|
||||
destination,
|
||||
room_id,
|
||||
event_id,
|
||||
content,
|
||||
) -> "SendJoinResponse":
|
||||
async def send_join_v1(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=True),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@log_function
|
||||
async def send_join_v2(
|
||||
self, room_version, destination, room_id, event_id, content
|
||||
) -> "SendJoinResponse":
|
||||
async def send_join_v2(self, destination, room_id, event_id, content):
|
||||
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=False),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
destination=destination, path=path, data=content
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -1023,7 +995,6 @@ class TransportLayerClient:
|
||||
returned per space
|
||||
exclude_rooms: a list of any rooms we can skip
|
||||
"""
|
||||
# TODO When switching to the stable endpoint, use GET instead of POST.
|
||||
path = _create_path(
|
||||
FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id
|
||||
)
|
||||
@@ -1081,59 +1052,3 @@ def _create_v2_path(path, *args):
|
||||
str
|
||||
"""
|
||||
return _create_path(FEDERATION_V2_PREFIX, path, *args)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class SendJoinResponse:
|
||||
"""The parsed response of a `/send_join` request."""
|
||||
|
||||
auth_events: List[EventBase]
|
||||
state: List[EventBase]
|
||||
|
||||
|
||||
@ijson.coroutine
|
||||
def _event_list_parser(room_version: RoomVersion, events: List[EventBase]):
|
||||
"""Helper function for use with `ijson.items_coro` to parse an array of
|
||||
events and add them to the given list.
|
||||
"""
|
||||
|
||||
while True:
|
||||
obj = yield
|
||||
event = make_event_from_dict(obj, room_version)
|
||||
events.append(event)
|
||||
|
||||
|
||||
class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
"""A parser for the response to `/send_join` requests.
|
||||
|
||||
Args:
|
||||
room_version: The version of the room.
|
||||
v1_api: Whether the response is in the v1 format.
|
||||
"""
|
||||
|
||||
CONTENT_TYPE = "application/json"
|
||||
|
||||
def __init__(self, room_version: RoomVersion, v1_api: bool):
|
||||
self._response = SendJoinResponse([], [])
|
||||
|
||||
# The V1 API has the shape of `[200, {...}]`, which we handle by
|
||||
# prefixing with `item.*`.
|
||||
prefix = "item." if v1_api else ""
|
||||
|
||||
self._coro_state = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.state),
|
||||
prefix + "state.item",
|
||||
)
|
||||
self._coro_auth = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.auth_events),
|
||||
prefix + "auth_chain.item",
|
||||
)
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
self._coro_state.send(data)
|
||||
self._coro_auth.send(data)
|
||||
|
||||
return len(data)
|
||||
|
||||
def finish(self) -> SendJoinResponse:
|
||||
return self._response
|
||||
|
||||
@@ -160,7 +160,7 @@ class Authenticator:
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
# alive
|
||||
retry_timings = await self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings.retry_last_ts:
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
run_in_background(self._reset_retry_timings, origin)
|
||||
|
||||
return origin
|
||||
@@ -1376,32 +1376,6 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
PATH = "/spaces/(?P<room_id>[^/]*)"
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Mapping[bytes, Sequence[bytes]],
|
||||
room_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
|
||||
max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space")
|
||||
|
||||
exclude_rooms = []
|
||||
if b"exclude_rooms" in query:
|
||||
try:
|
||||
exclude_rooms = [
|
||||
room_id.decode("ascii") for room_id in query[b"exclude_rooms"]
|
||||
]
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
# TODO When switching to the stable endpoint, remove the POST handler.
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -1428,7 +1402,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -15,9 +15,12 @@
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
@@ -33,11 +36,9 @@ class AccountValidityHandler:
|
||||
self.hs = hs
|
||||
self.config = hs.config
|
||||
self.store = self.hs.get_datastore()
|
||||
self.send_email_handler = self.hs.get_send_email_handler()
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.clock = self.hs.get_clock()
|
||||
|
||||
self._app_name = self.hs.config.email_app_name
|
||||
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
@@ -62,10 +63,23 @@ class AccountValidityHandler:
|
||||
self._template_text = (
|
||||
hs.config.account_validity.account_validity_template_text
|
||||
)
|
||||
self._renew_email_subject = (
|
||||
account_validity_renew_email_subject = (
|
||||
hs.config.account_validity.account_validity_renew_email_subject
|
||||
)
|
||||
|
||||
try:
|
||||
app_name = hs.config.email_app_name
|
||||
|
||||
self._subject = account_validity_renew_email_subject % {"app": app_name}
|
||||
|
||||
self._from_string = hs.config.email_notif_from % {"app": app_name}
|
||||
except Exception:
|
||||
# If substitution failed, fall back to the bare strings.
|
||||
self._subject = account_validity_renew_email_subject
|
||||
self._from_string = hs.config.email_notif_from
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
if hs.config.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
@@ -145,17 +159,38 @@ class AccountValidityHandler:
|
||||
}
|
||||
|
||||
html_text = self._template_html.render(**template_vars)
|
||||
html_part = MIMEText(html_text, "html", "utf8")
|
||||
|
||||
plain_text = self._template_text.render(**template_vars)
|
||||
text_part = MIMEText(plain_text, "plain", "utf8")
|
||||
|
||||
for address in addresses:
|
||||
raw_to = email.utils.parseaddr(address)[1]
|
||||
|
||||
await self.send_email_handler.send_email(
|
||||
email_address=raw_to,
|
||||
subject=self._renew_email_subject,
|
||||
app_name=self._app_name,
|
||||
html=html_text,
|
||||
text=plain_text,
|
||||
multipart_msg = MIMEMultipart("alternative")
|
||||
multipart_msg["Subject"] = self._subject
|
||||
multipart_msg["From"] = self._from_string
|
||||
multipart_msg["To"] = address
|
||||
multipart_msg["Date"] = email.utils.formatdate()
|
||||
multipart_msg["Message-ID"] = email.utils.make_msgid()
|
||||
multipart_msg.attach(text_part)
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending renewal email to %s", address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self.sendmail(
|
||||
self.hs.config.email_smtp_host,
|
||||
self._raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self.hs.get_reactor(),
|
||||
port=self.hs.config.email_smtp_port,
|
||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||
username=self.hs.config.email_smtp_user,
|
||||
password=self.hs.config.email_smtp_pass,
|
||||
requireTransportSecurity=self.hs.config.require_transport_security,
|
||||
)
|
||||
)
|
||||
|
||||
await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True)
|
||||
|
||||
@@ -17,7 +17,6 @@ import logging
|
||||
import time
|
||||
import unicodedata
|
||||
import urllib.parse
|
||||
from binascii import crc32
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -35,7 +34,6 @@ from typing import (
|
||||
import attr
|
||||
import bcrypt
|
||||
import pymacaroons
|
||||
import unpaddedbase64
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
@@ -68,7 +66,6 @@ from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -811,12 +808,10 @@ class AuthHandler(BaseHandler):
|
||||
logger.info(
|
||||
"Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(puppets_user_id)
|
||||
else:
|
||||
logger.info(
|
||||
"Logging in user %s on device %s%s", user_id, device_id, fmt_expiry
|
||||
)
|
||||
target_user_id_obj = UserID.from_string(user_id)
|
||||
|
||||
if (
|
||||
not is_appservice_ghost
|
||||
@@ -824,7 +819,7 @@ class AuthHandler(BaseHandler):
|
||||
):
|
||||
await self.auth.check_auth_blocking(user_id)
|
||||
|
||||
access_token = self.generate_access_token(target_user_id_obj)
|
||||
access_token = self.macaroon_gen.generate_access_token(user_id)
|
||||
await self.store.add_access_token_to_user(
|
||||
user_id=user_id,
|
||||
token=access_token,
|
||||
@@ -1197,19 +1192,6 @@ class AuthHandler(BaseHandler):
|
||||
return None
|
||||
return user_id
|
||||
|
||||
def generate_access_token(self, for_user: UserID) -> str:
|
||||
"""Generates an opaque string, for use as an access token"""
|
||||
|
||||
# we use the following format for access tokens:
|
||||
# syt_<base64 local part>_<random string>_<base62 crc check>
|
||||
|
||||
b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8"))
|
||||
random_string = stringutils.random_string(20)
|
||||
base = f"syt_{b64local}_{random_string}"
|
||||
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
@@ -1603,7 +1585,10 @@ class MacaroonGenerator:
|
||||
|
||||
hs = attr.ib()
|
||||
|
||||
def generate_guest_access_token(self, user_id: str) -> str:
|
||||
def generate_access_token(
|
||||
self, user_id: str, extra_caveats: Optional[List[str]] = None
|
||||
) -> str:
|
||||
extra_caveats = extra_caveats or []
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
# Include a nonce, to make sure that each login gets a different
|
||||
@@ -1611,7 +1596,8 @@ class MacaroonGenerator:
|
||||
macaroon.add_first_party_caveat(
|
||||
"nonce = %s" % (stringutils.random_string_with_symbols(16),)
|
||||
)
|
||||
macaroon.add_first_party_caveat("guest = true")
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
|
||||
from synapse.api.constants import ToDeviceEventTypes
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -79,8 +79,6 @@ class DeviceMessageHandler:
|
||||
ReplicationUserDevicesResyncRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
# a rate limiter for room key requests. The keys are
|
||||
# (sending_user_id, sending_device_id).
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
@@ -102,25 +100,12 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in content["messages"].items():
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
logger.warning("To-device message to non-local user %s", user_id)
|
||||
logger.warning("Request for keys for non-local user %s", user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not by_device:
|
||||
continue
|
||||
|
||||
# Ratelimit key requests by the sending user.
|
||||
if message_type == ToDeviceEventTypes.RoomKeyRequest:
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
None, (sender_user_id, None)
|
||||
)
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
@@ -207,19 +192,13 @@ class DeviceMessageHandler:
|
||||
for user_id, by_device in messages.items():
|
||||
# Ratelimit local cross-user key requests by the sending device.
|
||||
if (
|
||||
message_type == ToDeviceEventTypes.RoomKeyRequest
|
||||
message_type == EduTypes.RoomKeyRequest
|
||||
and user_id != sender_user_id
|
||||
):
|
||||
allowed, _ = await self._ratelimiter.can_do_action(
|
||||
and await self._ratelimiter.can_do_action(
|
||||
requester, (sender_user_id, requester.device_id)
|
||||
)
|
||||
if not allowed:
|
||||
logger.info(
|
||||
"Dropping room_key_request from %s to %s due to rate limit",
|
||||
sender_user_id,
|
||||
user_id,
|
||||
)
|
||||
continue
|
||||
):
|
||||
continue
|
||||
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
import logging
|
||||
import string
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
from typing import Iterable, List, Optional
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
|
||||
from synapse.api.errors import (
|
||||
@@ -27,19 +27,15 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.storage.databases.main.directory import RoomAliasMapping
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
|
||||
from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DirectoryHandler(BaseHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
def __init__(self, hs):
|
||||
super().__init__(hs)
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -64,7 +60,7 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id: str,
|
||||
servers: Optional[Iterable[str]] = None,
|
||||
creator: Optional[str] = None,
|
||||
) -> None:
|
||||
):
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
@@ -78,7 +74,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
servers = {get_domain_from_id(u) for u in users}
|
||||
|
||||
if not servers:
|
||||
@@ -108,9 +104,8 @@ class DirectoryHandler(BaseHandler):
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
room_alias_str = room_alias.to_string()
|
||||
|
||||
if len(room_alias_str) > MAX_ALIAS_LENGTH:
|
||||
if len(room_alias.to_string()) > MAX_ALIAS_LENGTH:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,
|
||||
@@ -119,7 +114,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
service = requester.app_service
|
||||
if service:
|
||||
if not service.is_interested_in_alias(room_alias_str):
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias.",
|
||||
@@ -143,7 +138,7 @@ class DirectoryHandler(BaseHandler):
|
||||
raise AuthError(403, "This user is not permitted to create this alias")
|
||||
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias_str
|
||||
user_id, room_id, room_alias.to_string()
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
@@ -216,7 +211,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def delete_appservice_association(
|
||||
self, service: ApplicationService, room_alias: RoomAlias
|
||||
) -> None:
|
||||
):
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -225,7 +220,7 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
await self._delete_association(room_alias)
|
||||
|
||||
async def _delete_association(self, room_alias: RoomAlias) -> str:
|
||||
async def _delete_association(self, room_alias: RoomAlias):
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
@@ -233,19 +228,17 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return room_id
|
||||
|
||||
async def get_association(self, room_alias: RoomAlias) -> JsonDict:
|
||||
async def get_association(self, room_alias: RoomAlias):
|
||||
room_id = None
|
||||
if self.hs.is_mine(room_alias):
|
||||
result = await self.get_association_from_room_alias(
|
||||
room_alias
|
||||
) # type: Optional[RoomAliasMapping]
|
||||
result = await self.get_association_from_room_alias(room_alias)
|
||||
|
||||
if result:
|
||||
room_id = result.room_id
|
||||
servers = result.servers
|
||||
else:
|
||||
try:
|
||||
fed_result = await self.federation.make_query(
|
||||
result = await self.federation.make_query(
|
||||
destination=room_alias.domain,
|
||||
query_type="directory",
|
||||
args={"room_alias": room_alias.to_string()},
|
||||
@@ -255,13 +248,13 @@ class DirectoryHandler(BaseHandler):
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
if e.code == 404:
|
||||
fed_result = None
|
||||
result = None
|
||||
else:
|
||||
raise
|
||||
|
||||
if fed_result and "room_id" in fed_result and "servers" in fed_result:
|
||||
room_id = fed_result["room_id"]
|
||||
servers = fed_result["servers"]
|
||||
if result and "room_id" in result and "servers" in result:
|
||||
room_id = result["room_id"]
|
||||
servers = result["servers"]
|
||||
|
||||
if not room_id:
|
||||
raise SynapseError(
|
||||
@@ -270,7 +263,7 @@ class DirectoryHandler(BaseHandler):
|
||||
Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
users = await self.state.get_current_users_in_room(room_id)
|
||||
extra_servers = {get_domain_from_id(u) for u in users}
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
@@ -282,7 +275,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
return {"room_id": room_id, "servers": servers}
|
||||
|
||||
async def on_directory_query(self, args: JsonDict) -> JsonDict:
|
||||
async def on_directory_query(self, args):
|
||||
room_alias = RoomAlias.from_string(args["room_alias"])
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room Alias is not hosted on this homeserver")
|
||||
@@ -300,7 +293,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def _update_canonical_alias(
|
||||
self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias
|
||||
) -> None:
|
||||
):
|
||||
"""
|
||||
Send an updated canonical alias event if the removed alias was set as
|
||||
the canonical alias or listed in the alt_aliases field.
|
||||
@@ -351,9 +344,7 @@ class DirectoryHandler(BaseHandler):
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
async def get_association_from_room_alias(
|
||||
self, room_alias: RoomAlias
|
||||
) -> Optional[RoomAliasMapping]:
|
||||
async def get_association_from_room_alias(self, room_alias: RoomAlias):
|
||||
result = await self.store.get_association_from_room_alias(room_alias)
|
||||
if not result:
|
||||
# Query AS to see if it exists
|
||||
@@ -381,7 +372,7 @@ class DirectoryHandler(BaseHandler):
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
return True
|
||||
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool:
|
||||
async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str):
|
||||
"""Determine whether a user can delete an alias.
|
||||
|
||||
One of the following must be true:
|
||||
@@ -403,13 +394,14 @@ class DirectoryHandler(BaseHandler):
|
||||
if not room_id:
|
||||
return False
|
||||
|
||||
return await self.auth.check_can_change_room_list(
|
||||
res = await self.auth.check_can_change_room_list(
|
||||
room_id, UserID.from_string(user_id)
|
||||
)
|
||||
return res
|
||||
|
||||
async def edit_published_room_list(
|
||||
self, requester: Requester, room_id: str, visibility: str
|
||||
) -> None:
|
||||
):
|
||||
"""Edit the entry of the room in the published room list.
|
||||
|
||||
requester
|
||||
@@ -477,7 +469,7 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
async def edit_published_appservice_room_list(
|
||||
self, appservice_id: str, network_id: str, room_id: str, visibility: str
|
||||
) -> None:
|
||||
):
|
||||
"""Add or remove a room from the appservice/network specific public
|
||||
room list.
|
||||
|
||||
@@ -507,4 +499,5 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id, requester.user.to_string()
|
||||
)
|
||||
|
||||
return await self.store.get_aliases_for_room(room_id)
|
||||
aliases = await self.store.get_aliases_for_room(room_id)
|
||||
return aliases
|
||||
|
||||
@@ -11,12 +11,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Collection, Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.constants import EventTypes, JoinRules
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -31,104 +29,46 @@ class EventAuthHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def check_restricted_join_rules(
|
||||
self,
|
||||
state_ids: StateMap[str],
|
||||
room_version: RoomVersion,
|
||||
user_id: str,
|
||||
prev_member_event: Optional[EventBase],
|
||||
) -> None:
|
||||
async def can_join_without_invite(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
|
||||
) -> bool:
|
||||
"""
|
||||
Check whether a user can join a room without an invite due to restricted join rules.
|
||||
Check whether a user can join a room without an invite.
|
||||
|
||||
When joining a room with restricted joined rules (as defined in MSC3083),
|
||||
the membership of spaces must be checked during a room join.
|
||||
the membership of spaces must be checked during join.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room being joined.
|
||||
user_id: The user joining the room.
|
||||
prev_member_event: The current membership event for this user.
|
||||
|
||||
Raises:
|
||||
AuthError if the user cannot join the room.
|
||||
"""
|
||||
# If the member is invited or currently joined, then nothing to do.
|
||||
if prev_member_event and (
|
||||
prev_member_event.membership in (Membership.JOIN, Membership.INVITE)
|
||||
):
|
||||
return
|
||||
|
||||
# This is not a room with a restricted join rule, so we don't need to do the
|
||||
# restricted room specific checks.
|
||||
#
|
||||
# Note: We'll be applying the standard join rule checks later, which will
|
||||
# catch the cases of e.g. trying to join private rooms without an invite.
|
||||
if not await self.has_restricted_join_rules(state_ids, room_version):
|
||||
return
|
||||
|
||||
# Get the spaces which allow access to this room and check if the user is
|
||||
# in any of them.
|
||||
allowed_spaces = await self.get_spaces_that_allow_join(state_ids)
|
||||
if not await self.is_user_in_rooms(allowed_spaces, user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
|
||||
async def has_restricted_join_rules(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion
|
||||
) -> bool:
|
||||
"""
|
||||
Return if the room has the proper join rules set for access via spaces.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room to query.
|
||||
|
||||
Returns:
|
||||
True if the proper room version and join rules are set for restricted access.
|
||||
True if the user can join the room, false otherwise.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
return False
|
||||
return True
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return False
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED
|
||||
|
||||
async def get_spaces_that_allow_join(
|
||||
self, state_ids: StateMap[str]
|
||||
) -> Collection[str]:
|
||||
"""
|
||||
Generate a list of spaces which allow access to a room.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
|
||||
Returns:
|
||||
A collection of spaces which provide membership to the room.
|
||||
"""
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return ()
|
||||
return True
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
|
||||
return True
|
||||
|
||||
# If allowed is of the wrong form, then only allow invited users.
|
||||
allowed_spaces = join_rules_event.content.get("allow", [])
|
||||
if not isinstance(allowed_spaces, list):
|
||||
return ()
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
|
||||
# Pull out the other room IDs, invalid data gets filtered.
|
||||
result = []
|
||||
for space in allowed_spaces:
|
||||
if not isinstance(space, dict):
|
||||
continue
|
||||
@@ -137,31 +77,10 @@ class EventAuthHandler:
|
||||
if not isinstance(space_id, str):
|
||||
continue
|
||||
|
||||
result.append(space_id)
|
||||
|
||||
return result
|
||||
|
||||
async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool:
|
||||
"""
|
||||
Check whether a user is a member of any of the provided rooms.
|
||||
|
||||
Args:
|
||||
room_ids: The rooms to check for membership.
|
||||
user_id: The user to check.
|
||||
|
||||
Returns:
|
||||
True if the user is in any of the rooms, false otherwise.
|
||||
"""
|
||||
if not room_ids:
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
|
||||
# Check each room and see if the user is in it.
|
||||
for room_id in room_ids:
|
||||
if room_id in joined_rooms:
|
||||
# The user was joined to one of the spaces specified, they can join
|
||||
# this room!
|
||||
if space_id in joined_rooms:
|
||||
return True
|
||||
|
||||
# The user was not in any of the rooms.
|
||||
# The user was not in any of the required spaces.
|
||||
return False
|
||||
|
||||
@@ -103,7 +103,7 @@ class EventStreamHandler(BaseHandler):
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = await self.store.get_users_in_room(
|
||||
users = await self.state.get_current_users_in_room(
|
||||
event.room_id
|
||||
) # type: Iterable[str]
|
||||
else:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user