1
0

Compare commits

..

4 Commits

Author SHA1 Message Date
Andrew Morgan
7c1fcb68a6 Remove any content pulling 2019-11-08 15:10:51 +00:00
Andrew Morgan
4def763144 Pull data, but don't use it 2019-11-08 14:58:10 +00:00
Andrew Morgan
ce95330771 Fix unpacking 2019-11-08 14:55:00 +00:00
Andrew Morgan
6e282bc343 Remove content from being sent for account data rdata stream 2019-11-08 11:44:22 +00:00
407 changed files with 9970 additions and 18343 deletions

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -e
set -ex
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
echo "Not merging forward, as this is a release branch"
@@ -18,8 +18,6 @@ else
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
fi
echo "--- merge_base_branch $GITBASE"
# Show what we are before
git --no-pager show -s

View File

@@ -1,21 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the script to connect to the postgresql database that will be available in the
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
signing_key_path: "/src/.buildkite/test.signing.key"
report_stats: false
database:
name: "psycopg2"
args:
user: postgres
host: postgres
password: postgres
database: synapse
# Suppress the key server warning.
trusted_key_servers:
- server_name: "matrix.org"
suppress_key_server_warning: true

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.engines import create_engine
logger = logging.getLogger("create_postgres_db")
if __name__ == "__main__":
# Create a PostgresEngine.
db_engine = create_engine({"name": "psycopg2", "args": {}})
# Connect to postgres to create the base database.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = db_engine.module.connect(
user="postgres", host="postgres", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
cur.execute("CREATE DATABASE synapse;")
cur.close()
db_conn.close()

View File

@@ -1,36 +0,0 @@
#!/bin/bash
#
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
# driver), update the schema of the test SQLite database and run background updates on it,
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
# test porting the SQLite database to the PostgreSQL database (with coverage).
set -xe
cd `dirname $0`/../..
echo "--- Install dependencies"
# Install dependencies for this test.
pip install psycopg2 coverage coverage-enable-subprocess
# Install Synapse itself. This won't update any libraries.
pip install -e .
echo "--- Generate the signing key"
# Generate the server's signing key.
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
echo "--- Prepare the databases"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
# Create the PostgreSQL database.
./.buildkite/scripts/create_postgres_db.py
echo "+++ Run synapse_port_db"
# Run the script
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml

View File

@@ -1,18 +0,0 @@
# Configuration file used for testing the 'synapse_port_db' script.
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
# schema and run background updates on it.
server_name: "localhost:8800"
signing_key_path: "/src/.buildkite/test.signing.key"
report_stats: false
database:
name: "sqlite3"
args:
database: ".buildkite/test_db.db"
# Suppress the key server warning.
trusted_key_servers:
- server_name: "matrix.org"
suppress_key_server_warning: true

Binary file not shown.

View File

@@ -28,14 +28,3 @@ User sees updates to presence from other users in the incremental sync.
Gapped incremental syncs include all state changes
Old members are included in gappy incr LL sync if they start speaking
# new failures as of https://github.com/matrix-org/sytest/pull/732
Device list doesn't change if remote server is down
Remote servers cannot set power levels in rooms without existing powerlevels
Remote servers should reject attempts by non-creators to set the power levels
# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
Server correctly handles incoming m.device_list_update
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state

View File

@@ -1,8 +1,7 @@
### Pull Request Checklist
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
* [ ] Pull request is based on the develop branch
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)

View File

@@ -46,6 +46,3 @@ Joseph Weston <joseph at weston.cloud>
Benjamin Saunders <ben.e.saunders at gmail dot com>
* Documentation improvements
Werner Sembach <werner.sembach at fau dot de>
* Automatically remove a group/community when it is empty

View File

@@ -1,325 +1,3 @@
Synapse 1.8.0 (2020-01-09)
==========================
Bugfixes
--------
- Fix `GET` request on `/_synapse/admin/v2/users` endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6563](https://github.com/matrix-org/synapse/issues/6563))
- Fix incorrect signing of responses from the key server implementation. ([\#6657](https://github.com/matrix-org/synapse/issues/6657))
Synapse 1.8.0rc1 (2020-01-07)
=============================
Features
--------
- Add v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)). ([\#6349](https://github.com/matrix-org/synapse/issues/6349))
- Add a develop script to generate full SQL schemas. ([\#6394](https://github.com/matrix-org/synapse/issues/6394))
- Add custom SAML username mapping functinality through an external provider plugin. ([\#6411](https://github.com/matrix-org/synapse/issues/6411))
- Automatically delete empty groups/communities. ([\#6453](https://github.com/matrix-org/synapse/issues/6453))
- Add option `limit_profile_requests_to_users_who_share_rooms` to prevent requirement of a local user sharing a room with another user to query their profile information. ([\#6523](https://github.com/matrix-org/synapse/issues/6523))
- Add an `export_signing_key` script to extract the public part of signing keys when rotating them. ([\#6546](https://github.com/matrix-org/synapse/issues/6546))
- Add experimental config option to specify multiple databases. ([\#6580](https://github.com/matrix-org/synapse/issues/6580))
- Raise an error if someone tries to use the `log_file` config option. ([\#6626](https://github.com/matrix-org/synapse/issues/6626))
Bugfixes
--------
- Prevent redacted events from being returned during message search. ([\#6377](https://github.com/matrix-org/synapse/issues/6377), [\#6522](https://github.com/matrix-org/synapse/issues/6522))
- Prevent error on trying to search a upgraded room when the server is not in the predecessor room. ([\#6385](https://github.com/matrix-org/synapse/issues/6385))
- Improve performance of looking up cross-signing keys. ([\#6486](https://github.com/matrix-org/synapse/issues/6486))
- Fix race which occasionally caused deleted devices to reappear. ([\#6514](https://github.com/matrix-org/synapse/issues/6514))
- Fix missing row in `device_max_stream_id` that could cause unable to decrypt errors after server restart. ([\#6555](https://github.com/matrix-org/synapse/issues/6555))
- Fix a bug which meant that we did not send systemd notifications on startup if acme was enabled. ([\#6571](https://github.com/matrix-org/synapse/issues/6571))
- Fix exception when fetching the `matrix.org:ed25519:auto` key. ([\#6625](https://github.com/matrix-org/synapse/issues/6625))
- Fix bug where a moderator upgraded a room and became an admin in the new room. ([\#6633](https://github.com/matrix-org/synapse/issues/6633))
- Fix an error which was thrown by the `PresenceHandler` `_on_shutdown` handler. ([\#6640](https://github.com/matrix-org/synapse/issues/6640))
- Fix exceptions in the synchrotron worker log when events are rejected. ([\#6645](https://github.com/matrix-org/synapse/issues/6645))
- Ensure that upgraded rooms are removed from the directory. ([\#6648](https://github.com/matrix-org/synapse/issues/6648))
- Fix a bug causing Synapse not to fetch missing events when it believes it has every event in the room. ([\#6652](https://github.com/matrix-org/synapse/issues/6652))
Improved Documentation
----------------------
- Document the Room Shutdown Admin API. ([\#6541](https://github.com/matrix-org/synapse/issues/6541))
- Reword sections of [docs/federate.md](docs/federate.md) that explained delegation at time of Synapse 1.0 transition. ([\#6601](https://github.com/matrix-org/synapse/issues/6601))
- Added the section 'Configuration' in [docs/turn-howto.md](docs/turn-howto.md). ([\#6614](https://github.com/matrix-org/synapse/issues/6614))
Deprecations and Removals
-------------------------
- Remove redundant code from event authorisation implementation. ([\#6502](https://github.com/matrix-org/synapse/issues/6502))
- Remove unused, undocumented `/_matrix/content` API. ([\#6628](https://github.com/matrix-org/synapse/issues/6628))
Internal Changes
----------------
- Add *experimental* support for multiple physical databases and split out state storage to separate data store. ([\#6245](https://github.com/matrix-org/synapse/issues/6245), [\#6510](https://github.com/matrix-org/synapse/issues/6510), [\#6511](https://github.com/matrix-org/synapse/issues/6511), [\#6513](https://github.com/matrix-org/synapse/issues/6513), [\#6564](https://github.com/matrix-org/synapse/issues/6564), [\#6565](https://github.com/matrix-org/synapse/issues/6565))
- Port sections of code base to async/await. ([\#6496](https://github.com/matrix-org/synapse/issues/6496), [\#6504](https://github.com/matrix-org/synapse/issues/6504), [\#6505](https://github.com/matrix-org/synapse/issues/6505), [\#6517](https://github.com/matrix-org/synapse/issues/6517), [\#6559](https://github.com/matrix-org/synapse/issues/6559), [\#6647](https://github.com/matrix-org/synapse/issues/6647), [\#6653](https://github.com/matrix-org/synapse/issues/6653))
- Remove `SnapshotCache` in favour of `ResponseCache`. ([\#6506](https://github.com/matrix-org/synapse/issues/6506))
- Silence mypy errors for files outside those specified. ([\#6512](https://github.com/matrix-org/synapse/issues/6512))
- Clean up some logging when handling incoming events over federation. ([\#6515](https://github.com/matrix-org/synapse/issues/6515))
- Test more folders against mypy. ([\#6534](https://github.com/matrix-org/synapse/issues/6534))
- Update `mypy` to new version. ([\#6537](https://github.com/matrix-org/synapse/issues/6537))
- Adjust the sytest blacklist for worker mode. ([\#6538](https://github.com/matrix-org/synapse/issues/6538))
- Remove unused `get_pagination_rows` methods from `EventSource` classes. ([\#6557](https://github.com/matrix-org/synapse/issues/6557))
- Clean up logs from the push notifier at startup. ([\#6558](https://github.com/matrix-org/synapse/issues/6558))
- Improve diagnostics on database upgrade failure. ([\#6570](https://github.com/matrix-org/synapse/issues/6570))
- Reduce the reconnect time when worker replication fails, to make it easier to catch up. ([\#6617](https://github.com/matrix-org/synapse/issues/6617))
- Simplify http handling by removing redundant `SynapseRequestFactory`. ([\#6619](https://github.com/matrix-org/synapse/issues/6619))
- Add a workaround for synapse raising exceptions when fetching the notary's own key from the notary. ([\#6620](https://github.com/matrix-org/synapse/issues/6620))
- Automate generation of the sample log config. ([\#6627](https://github.com/matrix-org/synapse/issues/6627))
- Simplify event creation code by removing redundant queries on the `event_reference_hashes` table. ([\#6629](https://github.com/matrix-org/synapse/issues/6629))
- Fix errors when `frozen_dicts` are enabled. ([\#6642](https://github.com/matrix-org/synapse/issues/6642))
Synapse 1.7.3 (2019-12-31)
==========================
This release fixes a long-standing bug in the state resolution algorithm.
Bugfixes
--------
- Fix exceptions caused by state resolution choking on malformed events. ([\#6608](https://github.com/matrix-org/synapse/issues/6608))
Synapse 1.7.2 (2019-12-20)
==========================
This release fixes some regressions introduced in Synapse 1.7.0 and 1.7.1.
Bugfixes
--------
- Fix a regression introduced in Synapse 1.7.1 which caused errors when attempting to backfill rooms over federation. ([\#6576](https://github.com/matrix-org/synapse/issues/6576))
- Fix a bug introduced in Synapse 1.7.0 which caused an error on startup when upgrading from versions before 1.3.0. ([\#6578](https://github.com/matrix-org/synapse/issues/6578))
Synapse 1.7.1 (2019-12-18)
==========================
This release includes several security fixes as well as a fix to a bug exposed by the security fixes. Administrators are encouraged to upgrade as soon as possible.
Security updates
----------------
- Fix a bug which could cause room events to be incorrectly authorized using events from a different room. ([\#6501](https://github.com/matrix-org/synapse/issues/6501), [\#6503](https://github.com/matrix-org/synapse/issues/6503), [\#6521](https://github.com/matrix-org/synapse/issues/6521), [\#6524](https://github.com/matrix-org/synapse/issues/6524), [\#6530](https://github.com/matrix-org/synapse/issues/6530), [\#6531](https://github.com/matrix-org/synapse/issues/6531))
- Fix a bug causing responses to the `/context` client endpoint to not use the pruned version of the event. ([\#6553](https://github.com/matrix-org/synapse/issues/6553))
- Fix a cause of state resets in room versions 2 onwards. ([\#6556](https://github.com/matrix-org/synapse/issues/6556), [\#6560](https://github.com/matrix-org/synapse/issues/6560))
Bugfixes
--------
- Fix a bug which could cause the federation server to incorrectly return errors when handling certain obscure event graphs. ([\#6526](https://github.com/matrix-org/synapse/issues/6526), [\#6527](https://github.com/matrix-org/synapse/issues/6527))
Synapse 1.7.0 (2019-12-13)
==========================
This release changes the default settings so that only local authenticated users can query the server's room directory. See the [upgrade notes](UPGRADE.rst#upgrading-to-v170) for details.
Support for SQLite versions before 3.11 is now deprecated. A future release will refuse to start if used with an SQLite version before 3.11.
Administrators are reminded that SQLite should not be used for production instances. Instructions for migrating to Postgres are available [here](docs/postgres.md). A future release of synapse will, by default, disable federation for servers using SQLite.
No significant changes since 1.7.0rc2.
Synapse 1.7.0rc2 (2019-12-11)
=============================
Bugfixes
--------
- Fix incorrect error message for invalid requests when setting user's avatar URL. ([\#6497](https://github.com/matrix-org/synapse/issues/6497))
- Fix support for SQLite 3.7. ([\#6499](https://github.com/matrix-org/synapse/issues/6499))
- Fix regression where sending email push would not work when using a pusher worker. ([\#6507](https://github.com/matrix-org/synapse/issues/6507), [\#6509](https://github.com/matrix-org/synapse/issues/6509))
Synapse 1.7.0rc1 (2019-12-09)
=============================
Features
--------
- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815), [\#6436](https://github.com/matrix-org/synapse/issues/6436))
- Add etag and count fields to key backup endpoints to help clients guess if there are new keys. ([\#5858](https://github.com/matrix-org/synapse/issues/5858))
- Add `/admin/v2/users` endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
- Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119))
- Implement the `/_matrix/federation/unstable/net.atleastfornow/state/<context>` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176))
- Configure privacy-preserving settings by default for the room directory. ([\#6355](https://github.com/matrix-org/synapse/issues/6355))
- Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409))
- Add support for [MSC 2367](https://github.com/matrix-org/matrix-doc/pull/2367), which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434))
Bugfixes
--------
- Transfer non-standard power levels on room upgrade. ([\#6237](https://github.com/matrix-org/synapse/issues/6237))
- Fix error from the Pillow library when uploading RGBA images. ([\#6241](https://github.com/matrix-org/synapse/issues/6241))
- Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests. ([\#6329](https://github.com/matrix-org/synapse/issues/6329))
- Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. ([\#6332](https://github.com/matrix-org/synapse/issues/6332))
- Prevent account data syncs getting lost across TCP replication. ([\#6333](https://github.com/matrix-org/synapse/issues/6333))
- Fix bug: TypeError in `register_user()` while using LDAP auth module. ([\#6406](https://github.com/matrix-org/synapse/issues/6406))
- Fix an intermittent exception when handling read-receipts. ([\#6408](https://github.com/matrix-org/synapse/issues/6408))
- Fix broken guest registration when there are existing blocks of numeric user IDs. ([\#6420](https://github.com/matrix-org/synapse/issues/6420))
- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
- Fix error when using synapse_port_db on a vanilla synapse db. ([\#6449](https://github.com/matrix-org/synapse/issues/6449))
- Fix uploading multiple cross signing signatures for the same user. ([\#6451](https://github.com/matrix-org/synapse/issues/6451))
- Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. ([\#6462](https://github.com/matrix-org/synapse/issues/6462))
- Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process. ([\#6470](https://github.com/matrix-org/synapse/issues/6470))
- Improve sanity-checking when receiving events over federation. ([\#6472](https://github.com/matrix-org/synapse/issues/6472))
- Fix inaccurate per-block Prometheus metrics. ([\#6491](https://github.com/matrix-org/synapse/issues/6491))
- Fix small performance regression for sending invites. ([\#6493](https://github.com/matrix-org/synapse/issues/6493))
- Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. ([\#6494](https://github.com/matrix-org/synapse/issues/6494))
Improved Documentation
----------------------
- Update documentation and variables in user contributed systemd reference file. ([\#6369](https://github.com/matrix-org/synapse/issues/6369), [\#6490](https://github.com/matrix-org/synapse/issues/6490))
- Fix link in the user directory documentation. ([\#6388](https://github.com/matrix-org/synapse/issues/6388))
- Add build instructions to the docker readme. ([\#6390](https://github.com/matrix-org/synapse/issues/6390))
- Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. ([\#6443](https://github.com/matrix-org/synapse/issues/6443))
- Write some docs for the quarantine_media api. ([\#6458](https://github.com/matrix-org/synapse/issues/6458))
- Convert CONTRIBUTING.rst to markdown (among other small fixes). ([\#6461](https://github.com/matrix-org/synapse/issues/6461))
Deprecations and Removals
-------------------------
- Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
- Remove fallback for federation with old servers which lack the /federation/v1/state_ids API. ([\#6488](https://github.com/matrix-org/synapse/issues/6488))
Internal Changes
----------------
- Add benchmarks for structured logging and improve output performance. ([\#6266](https://github.com/matrix-org/synapse/issues/6266))
- Improve the performance of outputting structured logging. ([\#6322](https://github.com/matrix-org/synapse/issues/6322))
- Refactor some code in the event authentication path for clarity. ([\#6343](https://github.com/matrix-org/synapse/issues/6343), [\#6468](https://github.com/matrix-org/synapse/issues/6468), [\#6480](https://github.com/matrix-org/synapse/issues/6480))
- Clean up some unnecessary quotation marks around the codebase. ([\#6362](https://github.com/matrix-org/synapse/issues/6362))
- Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. ([\#6379](https://github.com/matrix-org/synapse/issues/6379))
- Add a test scenario to make sure room history purges don't break `/messages` in the future. ([\#6392](https://github.com/matrix-org/synapse/issues/6392))
- Clarifications for the email configuration settings. ([\#6423](https://github.com/matrix-org/synapse/issues/6423))
- Add more tests to the blacklist when running in worker mode. ([\#6429](https://github.com/matrix-org/synapse/issues/6429))
- Refactor data store layer to support multiple databases in the future. ([\#6454](https://github.com/matrix-org/synapse/issues/6454), [\#6464](https://github.com/matrix-org/synapse/issues/6464), [\#6469](https://github.com/matrix-org/synapse/issues/6469), [\#6487](https://github.com/matrix-org/synapse/issues/6487))
- Port synapse.rest.client.v1 to async/await. ([\#6482](https://github.com/matrix-org/synapse/issues/6482))
- Port synapse.rest.client.v2_alpha to async/await. ([\#6483](https://github.com/matrix-org/synapse/issues/6483))
- Port SyncHandler to async/await. ([\#6484](https://github.com/matrix-org/synapse/issues/6484))
Synapse 1.6.1 (2019-11-28)
==========================
Security updates
----------------
This release includes a security fix ([\#6426](https://github.com/matrix-org/synapse/issues/6426), below). Administrators are encouraged to upgrade as soon as possible.
Bugfixes
--------
- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426))
- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
Synapse 1.6.0 (2019-11-26)
==========================
Bugfixes
--------
- Fix phone home stats reporting. ([\#6418](https://github.com/matrix-org/synapse/issues/6418))
Synapse 1.6.0rc2 (2019-11-25)
=============================
Bugfixes
--------
- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
Synapse 1.6.0rc1 (2019-11-20)
=============================
Features
--------
- Add federation support for cross-signing. ([\#5727](https://github.com/matrix-org/synapse/issues/5727))
- Increase default room version from 4 to 5, thereby enforcing server key validity period checks. ([\#6220](https://github.com/matrix-org/synapse/issues/6220))
- Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. ([\#6238](https://github.com/matrix-org/synapse/issues/6238))
- Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). ([\#6301](https://github.com/matrix-org/synapse/issues/6301), [\#6310](https://github.com/matrix-org/synapse/issues/6310), [\#6340](https://github.com/matrix-org/synapse/issues/6340))
Bugfixes
--------
- Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. ([\#6213](https://github.com/matrix-org/synapse/issues/6213))
- Remove a room from a server's public rooms list on room upgrade. ([\#6232](https://github.com/matrix-org/synapse/issues/6232), [\#6235](https://github.com/matrix-org/synapse/issues/6235))
- Delete keys from key backup when deleting backup versions. ([\#6253](https://github.com/matrix-org/synapse/issues/6253))
- Make notification of cross-signing signatures work with workers. ([\#6254](https://github.com/matrix-org/synapse/issues/6254))
- Fix exception when remote servers attempt to join a room that they're not allowed to join. ([\#6278](https://github.com/matrix-org/synapse/issues/6278))
- Prevent errors from appearing on Synapse startup if `git` is not installed. ([\#6284](https://github.com/matrix-org/synapse/issues/6284))
- Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306))
- Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307))
- Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313))
- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
- Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335))
- Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338))
- Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359))
- Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. ([\#6363](https://github.com/matrix-org/synapse/issues/6363))
- Fix permission denied error when trying to generate a config file with the docker image. ([\#6389](https://github.com/matrix-org/synapse/issues/6389))
Improved Documentation
----------------------
- Contributor documentation now mentions script to run linters. ([\#6164](https://github.com/matrix-org/synapse/issues/6164))
- Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. ([\#6257](https://github.com/matrix-org/synapse/issues/6257))
- Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. ([\#6272](https://github.com/matrix-org/synapse/issues/6272))
- Fix a small typo in `account_threepid_delegates` configuration option. ([\#6273](https://github.com/matrix-org/synapse/issues/6273))
Internal Changes
----------------
- Add a CI job to test the `synapse_port_db` script. ([\#6140](https://github.com/matrix-org/synapse/issues/6140), [\#6276](https://github.com/matrix-org/synapse/issues/6276))
- Convert EventContext to an attrs. ([\#6218](https://github.com/matrix-org/synapse/issues/6218))
- Move `persist_events` out from main data store. ([\#6240](https://github.com/matrix-org/synapse/issues/6240), [\#6300](https://github.com/matrix-org/synapse/issues/6300))
- Reduce verbosity of user/room stats. ([\#6250](https://github.com/matrix-org/synapse/issues/6250))
- Reduce impact of debug logging. ([\#6251](https://github.com/matrix-org/synapse/issues/6251))
- Expose some homeserver functionality to spam checkers. ([\#6259](https://github.com/matrix-org/synapse/issues/6259))
- Change cache descriptors to always return deferreds. ([\#6263](https://github.com/matrix-org/synapse/issues/6263), [\#6291](https://github.com/matrix-org/synapse/issues/6291))
- Fix incorrect comment regarding the functionality of an `if` statement. ([\#6269](https://github.com/matrix-org/synapse/issues/6269))
- Update CI to run `isort` over the `scripts` and `scripts-dev` directories. ([\#6270](https://github.com/matrix-org/synapse/issues/6270))
- Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. ([\#6271](https://github.com/matrix-org/synapse/issues/6271), [\#6314](https://github.com/matrix-org/synapse/issues/6314))
- Port replication http server endpoints to async/await. ([\#6274](https://github.com/matrix-org/synapse/issues/6274))
- Port room rest handlers to async/await. ([\#6275](https://github.com/matrix-org/synapse/issues/6275))
- Remove redundant CLI parameters on CI's `flake8` step. ([\#6277](https://github.com/matrix-org/synapse/issues/6277))
- Port `federation_server.py` to async/await. ([\#6279](https://github.com/matrix-org/synapse/issues/6279))
- Port receipt and read markers to async/wait. ([\#6280](https://github.com/matrix-org/synapse/issues/6280))
- Split out state storage into separate data store. ([\#6294](https://github.com/matrix-org/synapse/issues/6294), [\#6295](https://github.com/matrix-org/synapse/issues/6295))
- Refactor EventContext for clarity. ([\#6298](https://github.com/matrix-org/synapse/issues/6298))
- Update the version of black used to 19.10b0. ([\#6304](https://github.com/matrix-org/synapse/issues/6304))
- Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305))
- Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308))
- Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312))
- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
- Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336))
- Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319))
- Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330))
- Add continuous integration for python 3.8. ([\#6341](https://github.com/matrix-org/synapse/issues/6341))
- Correct spacing/case of various instances of the word "homeserver". ([\#6357](https://github.com/matrix-org/synapse/issues/6357))
- Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. ([\#6361](https://github.com/matrix-org/synapse/issues/6361))
Synapse 1.5.1 (2019-11-06)
==========================

View File

@@ -1,4 +1,5 @@
# Contributing code to Matrix
Contributing code to Matrix
===========================
Everyone is welcome to contribute code to Matrix
(https://github.com/matrix-org), provided that they are willing to license
@@ -6,17 +7,18 @@ their contributions under the same license as the project itself. We follow a
simple 'inbound=outbound' model for contributions: the act of submitting an
'inbound' contribution means that the contributor agrees to license the code
under the same terms as the project's overall 'outbound' license - in our
case, this is almost always Apache Software License v2 (see [LICENSE](LICENSE)).
case, this is almost always Apache Software License v2 (see LICENSE).
## How to contribute
How to contribute
~~~~~~~~~~~~~~~~~
The preferred and easiest way to contribute changes to Matrix is to fork the
relevant project on github, and then [create a pull request](
https://help.github.com/articles/using-pull-requests/) to ask us to pull
your changes into our repo.
relevant project on github, and then create a pull request to ask us to pull
your changes into our repo
(https://help.github.com/articles/using-pull-requests/)
**The single biggest thing you need to know is: please base your changes on
the develop branch - *not* master.**
the develop branch - /not/ master.**
We use the master branch to track the most recent release, so that folks who
blindly clone the repo and automatically check out master get something that
@@ -28,8 +30,9 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use [Buildkite](https://buildkite.com/matrix-dot-org/synapse) for continuous
integration. If your change breaks the build, this will be shown in GitHub, so
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
continuous integration. Buildkite builds need to be authorised by a
maintainer. If your change breaks the build, this will be shown in GitHub, so
please keep an eye on the pull request for feedback.
To run unit tests in a local development environment, you can use:
@@ -44,63 +47,41 @@ To run unit tests in a local development environment, you can use:
set up PostgreSQL yourself.
Docker images are available for running the integration tests (SyTest) locally,
see the [documentation in the SyTest repo](
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
see the `documentation in the SyTest repo
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
information.
## Code style
Code style
~~~~~~~~~~
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
[here](docs/code_style.md).
To facilitate meeting these criteria you can run `scripts-dev/lint.sh`
locally. Since this runs the tools listed in the above document, you'll need
python 3.6 and to install each tool:
```
# Install the dependencies
pip install -U black flake8 isort
# Run the linter script
./scripts-dev/lint.sh
```
**Note that the script does not just test/check, but also reformats code, so you
may wish to ensure any new code is committed first**. By default this script
checks all files and can take some time; if you alter only certain files, you
might wish to specify paths as arguments to reduce the run-time:
```
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
Before pushing new changes, ensure they don't produce linting errors. Commit any
files that were corrected.
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
## Changelog
Changelog
~~~~~~~~~
All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the `changelog.d` directory named
in the format of `PRnumber.type`. The type can be one of the following:
To create a changelog entry, make a new file in the ``changelog.d`` file named
in the format of ``PRnumber.type``. The type can be one of the following:
* `feature`
* `bugfix`
* `docker` (for updates to the Docker image)
* `doc` (for updates to the documentation)
* `removal` (also used for deprecations)
* `misc` (for internal-only changes)
* ``feature``.
* ``bugfix``.
* ``docker`` (for updates to the Docker image).
* ``doc`` (for updates to the documentation).
* ``removal`` (also used for deprecations).
* ``misc`` (for internal-only changes).
The content of the file is your changelog entry, which should be a short
description of your change in the same style as the rest of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
description of your change in the same style as the rest of our `changelog
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
contain Markdown formatting, and should end with a full stop ('.') for
consistency.
@@ -108,21 +89,20 @@ Adding credits to the changelog is encouraged, we value your
contributions and would like to have you shouted out in the release notes!
For example, a fix in PR #1234 would have its changelog entry in
`changelog.d/1234.bugfix`, and contain content like "The security levels of
Florbs are now validated when received over federation. Contributed by Jane
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix.".
## Debian changelog
Debian changelog
----------------
Changes which affect the debian packaging files (in `debian`) are an
Changes which affect the debian packaging files (in ``debian``) are an
exception.
In this case, you will need to add an entry to the debian changelog for the
next release. For this, run the following command:
next release. For this, run the following command::
```
dch
```
dch
This will make up a new version number (if there isn't already an unreleased
version in flight), and open an editor where you can add a new changelog entry.
@@ -134,77 +114,74 @@ directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
## Sign off
Sign off
~~~~~~~~
In order to have a concrete record that your contribution is intentional
and you agree to license it under the same terms as the project's license, we've adopted the
same lightweight approach that the Linux Kernel
[submitting patches process](
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix:
the contribution or otherwise have the right to contribute it to Matrix::
```
Developer Certificate of Origin
Version 1.1
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment:
include the line in your commit or pull request comment::
```
Signed-off-by: Your Name <your@email.example.org>
```
Signed-off-by: Your Name <your@email.example.org>
We accept contributions under a legally identifiable name, such as
your name on government documentation or common-law names (names
claimed by legitimate usage or repute). Unfortunately, we cannot
accept anonymous contributions at this time.
Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
Git allows you to add this signoff automatically when using the ``-s``
flag to ``git commit``, which uses the name and email set in your
``user.name`` and ``user.email`` git configs.
## Conclusion
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
That's it! Matrix is a very open and collaborative project as you might expect
given our obsession with open communication. If we're going to successfully
matrix together all the fragmented communication technologies out there we are
reliant on contributions and collaboration from the community to do so. So
reliant on contributions and collaboration from the community to do so. So
please get involved - and we hope you have as much fun hacking on Matrix as we
do!

View File

@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.5, 3.6, 3.7 or 3.8.
- Python 3.5, 3.6, or 3.7
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
@@ -109,8 +109,8 @@ Installing prerequisites on Ubuntu or Debian:
```
sudo apt-get install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
```
#### ArchLinux
@@ -133,9 +133,9 @@ sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
sudo yum groupinstall "Development Tools"
```
#### macOS
#### Mac OS X
Installing prerequisites on macOS:
Installing prerequisites on Mac OS X:
```
xcode-select --install
@@ -144,14 +144,6 @@ sudo pip install virtualenv
brew install pkg-config libffi
```
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
via brew and inform `pip` about it so that `psycopg2` builds:
```
brew install openssl@1.1
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
```
#### OpenSUSE
Installing prerequisites on openSUSE:
@@ -421,18 +413,16 @@ For a more detailed guide to configuring your server for federation, see
## Email
It is desirable for Synapse to have the capability to send email. This allows
Synapse to send password reset emails, send verifications when an email address
is added to a user's account, and send email notifications to users when they
receive new messages.
It is desirable for Synapse to have the capability to send email. For example,
this is required to support the 'password reset' feature.
To configure an SMTP server for Synapse, modify the configuration section
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
and `notif_from` fields filled out. You may also need to set `smtp_user`,
`smtp_pass`, and `require_transport_security`.
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
``smtp_pass``, and ``require_transport_security``.
If email is not configured, password reset, registration and notifications via
email will be disabled.
If Synapse is not configured with an SMTP server, password reset via email will
be disabled by default.
## Registering a user

View File

@@ -393,4 +393,4 @@ something like the following in their logs::
2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
This is normally caused by a misconfiguration in your reverse-proxy. See
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
`<docs/reverse_proxy.rst>`_ and double-check that your settings are correct.

View File

@@ -75,23 +75,6 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
Upgrading to v1.7.0
===================
In an attempt to configure Synapse in a privacy preserving way, the default
behaviours of ``allow_public_rooms_without_auth`` and
``allow_public_rooms_over_federation`` have been inverted. This means that by
default, only authenticated users querying the Client/Server API will be able
to query the room directory, and relatedly that the server will not share
room directory information with other servers over federation.
If your installation does not explicitly set these settings one way or the other
and you want either setting to be ``true`` then it will necessary to update
your homeserver configuration file accordingly.
For more details on the surrounding context see our `explainer
<https://matrix.org/blog/2019/11/09/avoiding-unwelcome-visitors-on-private-matrix-servers>`_.
Upgrading to v1.5.0
===================

View File

@@ -78,7 +78,7 @@ class InputOutput(object):
m = re.match("^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
room_name, = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
# self.print_line("OK.")
@@ -105,7 +105,7 @@ class InputOutput(object):
m = re.match("^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
room_name, = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return

View File

@@ -1,17 +0,0 @@
# Setup Synapse with Systemd
This is a setup for managing synapse with a user contributed systemd unit
file. It provides a `matrix-synapse` systemd unit file that should be tailored
to accommodate your installation in accordance with the installation
instructions provided in [installation instructions](../../INSTALL.md).
## Setup
1. Under the service section, ensure the `User` variable matches which user
you installed synapse under and wish to run it as.
2. Under the service section, ensure the `WorkingDirectory` variable matches
where you have installed synapse.
3. Under the service section, ensure the `ExecStart` variable matches the
appropriate locations of your installation.
4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
5. Start Synapse: `sudo systemctl start matrix-synapse`
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`

View File

@@ -4,11 +4,8 @@
# systemctl enable matrix-synapse
# systemctl start matrix-synapse
#
# This assumes that Synapse has been installed by a user named
# synapse.
#
# This assumes that Synapse has been installed in a virtualenv in
# the user's home directory: `/home/synapse/synapse/env`.
# /opt/synapse/env.
#
# **NOTE:** This is an example service file that may change in the future. If you
# wish to use this please copy rather than symlink it.
@@ -25,8 +22,8 @@ Restart=on-abort
User=synapse
Group=nogroup
WorkingDirectory=/home/synapse/synapse
ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
WorkingDirectory=/opt/synapse
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
SyslogIdentifier=matrix-synapse
# adjust the cache factor if necessary

View File

@@ -85,9 +85,6 @@ PYTHONPATH="$tmpdir" \
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.
PYPKG=`basename $SNAKE`

46
debian/changelog vendored
View File

@@ -1,49 +1,3 @@
matrix-synapse-py3 (1.8.0) stable; urgency=medium
[ Richard van der Hoff ]
* Automate generation of the default log configuration file.
[ Synapse Packaging team ]
* New synapse release 1.8.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 09 Jan 2020 11:39:27 +0000
matrix-synapse-py3 (1.7.3) stable; urgency=medium
* New synapse release 1.7.3.
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Dec 2019 10:45:04 +0000
matrix-synapse-py3 (1.7.2) stable; urgency=medium
* New synapse release 1.7.2.
-- Synapse Packaging team <packages@matrix.org> Fri, 20 Dec 2019 10:56:50 +0000
matrix-synapse-py3 (1.7.1) stable; urgency=medium
* New synapse release 1.7.1.
-- Synapse Packaging team <packages@matrix.org> Wed, 18 Dec 2019 09:37:59 +0000
matrix-synapse-py3 (1.7.0) stable; urgency=medium
* New synapse release 1.7.0.
-- Synapse Packaging team <packages@matrix.org> Fri, 13 Dec 2019 10:19:38 +0000
matrix-synapse-py3 (1.6.1) stable; urgency=medium
* New synapse release 1.6.1.
-- Synapse Packaging team <packages@matrix.org> Thu, 28 Nov 2019 11:10:40 +0000
matrix-synapse-py3 (1.6.0) stable; urgency=medium
* New synapse release 1.6.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2019 12:15:40 +0000
matrix-synapse-py3 (1.5.1) stable; urgency=medium
* New synapse release 1.5.1.

1
debian/install vendored
View File

@@ -1 +1,2 @@
debian/log.yaml etc/matrix-synapse
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/

36
debian/log.yaml vendored Normal file
View File

@@ -0,0 +1,36 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/homeserver.log
maxBytes: 104857600
backupCount: 10
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
level: WARN
loggers:
synapse:
level: INFO
synapse.storage.SQL:
level: INFO
root:
level: INFO
handlers: [file, console]

View File

@@ -101,7 +101,7 @@ is suitable for local testing, but for any practical use, you will either need
to use a reverse proxy, or configure Synapse to expose an HTTPS port.
For documentation on using a reverse proxy, see
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
For more information on enabling TLS support in synapse itself, see
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
@@ -130,15 +130,3 @@ docker run -it --rm \
This will generate the same configuration file as the legacy mode used, but
will store it in `/data/homeserver.yaml` instead of a temporary location. You
can then use it as shown above at [Running synapse](#running-synapse).
## Building the image
If you need to build the image from a Synapse checkout, use the following `docker
build` command from the repo's root:
```
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
You can choose to build a different docker image by changing the value of the `-f` flag to
point to another Dockerfile.

View File

@@ -169,11 +169,11 @@ def run_generate_config(environ, ownership):
# log("running %s" % (args, ))
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
subprocess.check_output(["chown", ownership, data_dir])
args = ["su-exec", ownership] + args
os.execv("/sbin/su-exec", args)
# make sure that synapse has perms to write to the data dir.
subprocess.check_output(["chown", ownership, data_dir])
else:
os.execv("/usr/local/bin/python", args)
@@ -217,9 +217,8 @@ def main(args, environ):
# backwards-compatibility generate-a-config-on-the-fly mode
if "SYNAPSE_CONFIG_PATH" in environ:
error(
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
"in `generate` or `migrate_config` mode. To start synapse using a "
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
"SYNAPSE_SERVER_NAME and SYNAPSE_CONFIG_PATH are mutually exclusive "
"except in `generate` or `migrate_config` mode."
)
config_path = "/compiled/homeserver.yaml"

View File

@@ -4,7 +4,7 @@ The captcha mechanism used is Google's ReCaptcha. This requires API keys from Go
## Getting keys
Requires a site/secret key pair from:
Requires a public/private key pair from:
<https://developers.google.com/recaptcha/>
@@ -15,8 +15,8 @@ Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
The keys are a config option on the home server config. If they are not
visible, you can generate them via `--generate-config`. Set the following value:
recaptcha_public_key: YOUR_SITE_KEY
recaptcha_private_key: YOUR_SECRET_KEY
recaptcha_public_key: YOUR_PUBLIC_KEY
recaptcha_private_key: YOUR_PRIVATE_KEY
In addition, you MUST enable captchas via:

View File

@@ -21,20 +21,3 @@ It returns a JSON body like the following:
]
}
```
# Quarantine media in a room
This API 'quarantines' all the media in a room.
The API is:
```
POST /_synapse/admin/v1/quarantine_media/<room_id>
{}
```
Quarantining media means that it is marked as inaccessible by users. It applies
to any local media, and any locally-cached copies of remote media.
The media file itself (and any thumbnails) is not deleted from the server.

View File

@@ -1,72 +0,0 @@
# Shutdown room API
Shuts down a room, preventing new joins and moves local users and room aliases automatically
to a new room. The new room will be created with the user specified by the
`new_room_user_id` parameter as room administrator and will contain a message
explaining what happened. Users invited to the new room will have power level
-10 by default, and thus be unable to speak. The old room's power levels will be changed to
disallow any further invites or joins.
The local server will only have the power to move local user and room aliases to
the new room. Users on other servers will be unaffected.
## API
You will need to authenticate with an access token for an admin user.
### URL
`POST /_synapse/admin/v1/shutdown_room/{room_id}`
### URL Parameters
* `room_id` - The ID of the room (e.g `!someroom:example.com`)
### JSON Body Parameters
* `new_room_user_id` - Required. A string representing the user ID of the user that will admin
the new room that all users in the old room will be moved to.
* `room_name` - Optional. A string representing the name of the room that new users will be
invited to.
* `message` - Optional. A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly convey why the
original room was shut down.
If not specified, the default value of `room_name` is "Content Violation
Notification". The default value of `message` is "Sharing illegal content on
othis server is not permitted and rooms in violation will be blocked."
### Response Parameters
* `kicked_users` - An integer number representing the number of users that
were kicked.
* `failed_to_kick_users` - An integer number representing the number of users
that were not kicked.
* `local_aliases` - An array of strings representing the local aliases that were migrated from
the old room to the new.
* `new_room_id` - A string representing the room ID of the new room.
## Example
Request:
```
POST /_synapse/admin/v1/shutdown_room/!somebadroom%3Aexample.com
{
"new_room_user_id": "@someuser:example.com",
"room_name": "Content Violation Notification",
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service."
}
```
Response:
```
{
"kicked_users": 5,
"failed_to_kick_users": 0,
"local_aliases": ["#badroom:example.com", "#evilsaloon:example.com],
"new_room_id": "!newroomid:example.com",
},
```

View File

@@ -1,48 +1,3 @@
List Accounts
=============
This API returns all local user accounts.
The api is::
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
including an ``access_token`` of a server admin.
The parameters ``from`` and ``limit`` are required only for pagination.
By default, a ``limit`` of 100 is used.
The parameter ``user_id`` can be used to select only users with user ids that
contain this value.
The parameter ``guests=false`` can be used to exclude guest users,
default is to include guest users.
The parameter ``deactivated=true`` can be used to include deactivated users,
default is to exclude deactivated users.
If the endpoint does not return a ``next_token`` then there are no more users left.
It returns a JSON body like the following:
.. code:: json
{
"users": [
{
"name": "<user_id1>",
"password_hash": "<password_hash1>",
"is_guest": 0,
"admin": 0,
"user_type": null,
"deactivated": 0
}, {
"name": "<user_id2>",
"password_hash": "<password_hash2>",
"is_guest": 0,
"admin": 1,
"user_type": null,
"deactivated": 0
}
],
"next_token": "100"
}
Query Account
=============

View File

@@ -137,7 +137,6 @@ Some guidelines follow:
correctly handles the top-level option being set to `None` (as it
will be if no sub-options are enabled).
- Lines should be wrapped at 80 characters.
- Use two-space indents.
Example:
@@ -156,13 +155,13 @@ Example:
# Settings for the frobber
#
frobber:
# frobbing speed. Defaults to 1.
#
#speed: 10
# frobbing speed. Defaults to 1.
#
#speed: 10
# frobbing distance. Defaults to 1000.
#
#distance: 100
# frobbing distance. Defaults to 1000.
#
#distance: 100
Note that the sample configuration is generated from the synapse code
and is maintained by a script, `scripts-dev/generate_sample_config`.

View File

@@ -66,6 +66,10 @@ therefore cannot gain access to the necessary certificate. With .well-known,
federation servers will check for a valid TLS certificate for the delegated
hostname (in our example: ``synapse.example.com``).
.well-known support first appeared in Synapse v0.99.0. To federate with older
servers you may need to additionally configure SRV delegation. Alternatively,
encourage the server admin in question to upgrade :).
### DNS SRV delegation
To use this delegation method, you need to have write access to your
@@ -107,15 +111,29 @@ giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is
it would automatically generate a valid TLS certificate for you via Let's Encrypt
and no SRV record or .well-known URI would be needed.
This is the common case, although you can add an SRV record or
`.well-known/matrix/server` URI for completeness if you wish.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it. The way to do this is via .well-known or an
SRV record.
#### I have created a .well-known URI. Do I also need an SRV record?
#### I have created a .well-known URI. Do I still need an SRV record?
No. You can use either `.well-known` delegation or use an SRV record for delegation. You
do not need to use both to delegate to the same location.
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
URI and follow any delegation it suggests. It will only then check for the
existence of an SRV record.
That means that the SRV record will often be redundant. However, you should
remember that there may still be older versions of Synapse in the federation
which do not understand .well-known URIs, so if you removed your SRV record
you would no longer be able to federate with them.
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
earlier will follow the SRV record (and not care about the invalid
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
correct certificate chain.
#### Can I manage my own certificates rather than having Synapse renew certificates itself?

View File

@@ -1,77 +0,0 @@
# SAML Mapping Providers
A SAML mapping provider is a Python class (loaded via a Python module) that
works out how to map attributes of a SAML response object to Matrix-specific
user attributes. Details such as user ID localpart, displayname, and even avatar
URLs are all things that can be mapped from talking to a SSO service.
As an example, a SSO service may return the email address
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
to turn that into a displayname when creating a Matrix user for this individual.
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
variations. As each Synapse configuration may want something different, this is
where SAML mapping providers come into play.
## Enabling Providers
External mapping providers are provided to Synapse in the form of an external
Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere,
then tell Synapse where to look for the handler class by editing the
`saml2_config.user_mapping_provider.module` config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
comment these options out and use those specified by the module instead.
## Building a Custom Mapping Provider
A custom mapping provider must specify the following methods:
* `__init__(self, parsed_config)`
- Arguments:
- `parsed_config` - A configuration object that is the return value of the
`parse_config` method. You should set any configuration options needed by
the module here.
* `saml_response_to_user_attributes(self, saml_response, failures)`
- Arguments:
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
information from.
- `failures` - An `int` that represents the amount of times the returned
mxid localpart mapping has failed. This should be used
to create a deduplicated mxid localpart which should be
returned instead. For example, if this method returns
`john.doe` as the value of `mxid_localpart` in the returned
dict, and that is already taken on the homeserver, this
method will be called again with the same parameters but
with failures=1. The method should then return a different
`mxid_localpart` value, such as `john.doe1`.
- This method must return a dictionary, which will then be used by Synapse
to build a new user. The following keys are allowed:
* `mxid_localpart` - Required. The mxid localpart of the new user.
* `displayname` - The displayname of the new user. If not provided, will default to
the value of `mxid_localpart`.
* `parse_config(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
`saml2_config.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract any option values
they need here.
- Whatever is returned will be passed back to the user mapping provider module's
`__init__` method during construction.
* `get_saml_attributes(config)`
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A object resulting from a call to `parse_config`.
- Returns a tuple of two sets. The first set equates to the saml auth
response attributes that are required for the module to function, whereas
the second set consists of those attributes which can be used if available,
but are not necessary.
## Synapse's Default Provider
Synapse has a built-in SAML mapping provider if a custom provider isn't
specified in the config. It is located at
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).

View File

@@ -54,23 +54,15 @@ pid_file: DATADIR/homeserver.pid
#
#require_auth_for_profile_requests: true
# Uncomment to require a user to share a room with another user in order
# to retrieve their profile information. Only checked on Client-Server
# requests. Profile requests from other servers should be checked by the
# requesting server. Defaults to 'false'.
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
#
#limit_profile_requests_to_users_who_share_rooms: true
#allow_public_rooms_without_auth: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
# If set to 'false', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'true'.
#
#allow_public_rooms_without_auth: true
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
#
#allow_public_rooms_over_federation: true
#allow_public_rooms_over_federation: false
# The default room version for newly created rooms.
#
@@ -80,7 +72,7 @@ pid_file: DATADIR/homeserver.pid
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "5"
#default_room_version: "4"
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
@@ -295,7 +287,7 @@ listeners:
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained homeserver Settings
# Resource-constrained Homeserver Settings
#
# If limit_remote_rooms.enabled is True, the room complexity will be
# checked before a user joins a new remote room. If it is above
@@ -336,69 +328,6 @@ listeners:
#
#user_ips_max_age: 14d
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, a user won't be able to send a
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
# that's not within this range. This is especially useful in closed federations,
# in which server admins can make sure every federating server applies the same
# rules.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
# that purge to be performed by a job that's iterating over every room it knows,
# which would be quite heavy on the server.
#
#purge_jobs:
# - shortest_max_lifetime: 1d
# longest_max_lifetime: 3d
# interval: 5m:
# - shortest_max_lifetime: 3d
# longest_max_lifetime: 1y
# interval: 24h
## TLS ##
@@ -692,6 +621,10 @@ media_store_path: "DATADIR/media_store"
# config:
# directory: /mnt/some/other/directory
# Directory where in-progress uploads are stored.
#
uploads_path: "DATADIR/uploads"
# The largest allowed upload size in bytes
#
#max_upload_size: 10M
@@ -810,11 +743,11 @@ media_store_path: "DATADIR/media_store"
## Captcha ##
# See docs/CAPTCHA_SETUP for full details of configuring this.
# This homeserver's ReCAPTCHA public key.
# This Home Server's ReCAPTCHA public key.
#
#recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This homeserver's ReCAPTCHA private key.
# This Home Server's ReCAPTCHA private key.
#
#recaptcha_private_key: "YOUR_PRIVATE_KEY"
@@ -1022,7 +955,7 @@ media_store_path: "DATADIR/media_store"
# If a delegate is specified, the config option public_baseurl must also be filled out.
#
account_threepid_delegates:
#email: https://example.com # Delegate email sending to example.com
#email: https://example.com # Delegate email sending to example.org
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
# Users who register on this homeserver will automatically be joined
@@ -1118,19 +1051,14 @@ metrics_flags:
signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# The keys that the server used to sign messages with but won't use
# to sign new messages.
# to sign new messages. E.g. it has lost its private key
#
old_signing_keys:
# For each key, `key` should be the base64-encoded public key, and
# `expired_ts`should be the time (in milliseconds since the unix epoch) that
# it was last used.
#
# It is possible to build an entry from an old signing.key file using the
# `export_signing_key` script which is provided with synapse.
#
# For example:
#
#"ed25519:id": { key: "base64string", expired_ts: 123456789123 }
#old_signing_keys:
# "ed25519:auto":
# # Base64 encoded public key
# key: "The public part of your old signing key."
# # Millisecond POSIX timestamp when the key expired.
# expired_ts: 123456789123
# How long key response published by this server is valid for.
# Used to set the valid_until_ts in /key/v2 APIs.
@@ -1258,58 +1186,33 @@ saml2_config:
#
#config_path: "CONFDIR/sp_conf.py"
# The lifetime of a SAML session. This defines how long a user has to
# the lifetime of a SAML session. This defines how long a user has to
# complete the authentication process, if allow_unsolicited is unset.
# The default is 5 minutes.
#
#saml_session_lifetime: 5m
# An external module can be provided here as a custom solution to
# mapping attributes returned from a saml provider onto a matrix user.
# The SAML attribute (after mapping via the attribute maps) to use to derive
# the Matrix ID from. 'uid' by default.
#
user_mapping_provider:
# The custom module's class. Uncomment to use a custom module.
#
#module: mapping_provider.SamlMappingProvider
#mxid_source_attribute: displayName
# Custom configuration values for the module. Below options are
# intended for the built-in provider, they should be changed if
# using a custom module. This section will be passed as a Python
# dictionary to the module's `parse_config` method.
#
config:
# The SAML attribute (after mapping via the attribute maps) to use
# to derive the Matrix ID from. 'uid' by default.
#
# Note: This used to be configured by the
# saml2_config.mxid_source_attribute option. If that is still
# defined, its value will be used instead.
#
#mxid_source_attribute: displayName
# The mapping system to use for mapping the saml attribute onto a matrix ID.
# Options include:
# * 'hexencode' (which maps unpermitted characters to '=xx')
# * 'dotreplace' (which replaces unpermitted characters with '.').
# The default is 'hexencode'.
#
#mxid_mapping: dotreplace
# The mapping system to use for mapping the saml attribute onto a
# matrix ID.
#
# Options include:
# * 'hexencode' (which maps unpermitted characters to '=xx')
# * 'dotreplace' (which replaces unpermitted characters with
# '.').
# The default is 'hexencode'.
#
# Note: This used to be configured by the
# saml2_config.mxid_mapping option. If that is still defined, its
# value will be used instead.
#
#mxid_mapping: dotreplace
# In previous versions of synapse, the mapping from SAML attribute to
# MXID was always calculated dynamically rather than stored in a
# table. For backwards- compatibility, we will look for user_ids
# matching such a pattern before creating a new account.
# In previous versions of synapse, the mapping from SAML attribute to MXID was
# always calculated dynamically rather than stored in a table. For backwards-
# compatibility, we will look for user_ids matching such a pattern before
# creating a new account.
#
# This setting controls the SAML attribute which will be used for this
# backwards-compatibility lookup. Typically it should be 'uid', but if
# the attribute maps are changed, it may be necessary to change it.
# backwards-compatibility lookup. Typically it should be 'uid', but if the
# attribute maps are changed, it may be necessary to change it.
#
# The default is 'uid'.
#
@@ -1367,23 +1270,8 @@ password_config:
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: false
#
# # notif_from defines the "From" address to use when sending emails.
# # It must be set if email sending is enabled.
# #
# # The placeholder '%(app)s' will be replaced by the application name,
# # which is normally 'app_name' (below), but may be overridden by the
# # Matrix client application.
# #
# # Note that the placeholder must be written '%(app)s', including the
# # trailing 's'.
# #
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
#
# # app_name defines the default value for '%(app)s' in notif_from. It
# # defaults to 'Matrix'.
# #
# #app_name: my_branded_matrix_server
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
#
# # Enable email notifications by default
# #

View File

@@ -1,43 +0,0 @@
# Log configuration for Synapse.
#
# This is a YAML file containing a standard Python logging configuration
# dictionary. See [1] for details on the valid settings.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/homeserver.log
maxBytes: 104857600
backupCount: 10
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
root:
level: INFO
handlers: [file, console]
disable_existing_loggers: false

View File

@@ -199,20 +199,7 @@ client (C):
#### REPLICATE (C)
Asks the server to replicate a given stream. The syntax is:
```
REPLICATE <stream_name> <token>
```
Where `<token>` may be either:
* a numeric stream_id to stream updates since (exclusive)
* `NOW` to stream all subsequent updates.
The `<stream_name>` is the name of a replication stream to subscribe
to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
of streams). It can also be `ALL` to subscribe to all known streams,
in which case the `<token>` must be set to `NOW`.
Asks the server to replicate a given stream
#### USER_SYNC (C)

View File

@@ -39,8 +39,6 @@ The TURN daemon `coturn` is available from a variety of sources such as native p
make
make install
### Configuration
1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
lines, with example values, are:

View File

@@ -7,6 +7,7 @@ who are present in a publicly viewable room present on the server.
The directory info is stored in various tables, which can (typically after
DB corruption) get stale or out of sync. If this happens, for now the
solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
solution to fix it is to execute the SQL here
https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
and then restart synapse. This should then start a background task to
flush the current tables and regenerate the directory.

View File

@@ -196,7 +196,7 @@ Handles the media repository. It can handle all endpoints starting with:
/_matrix/media/
... and the following regular expressions matching media-specific administration APIs:
And the following regular expressions matching media-specific administration APIs:
^/_synapse/admin/v1/purge_media_cache$
^/_synapse/admin/v1/room/.*/media$
@@ -206,18 +206,6 @@ You should also set `enable_media_repo: False` in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository.
In the `media_repository` worker configuration file, configure the http listener to
expose the `media` resource. For example:
```yaml
worker_listeners:
- type: http
port: 8085
resources:
- names:
- media
```
Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.client_reader`

View File

@@ -1,11 +1,8 @@
[mypy]
namespace_packages = True
plugins = mypy_zope:plugin
follow_imports = silent
check_untyped_defs = True
show_error_codes = True
show_traceback = True
mypy_path = stubs
namespace_packages=True
plugins=mypy_zope:plugin
follow_imports=skip
mypy_path=stubs
[mypy-zope]
ignore_missing_imports = True

View File

@@ -20,13 +20,11 @@ from concurrent.futures import ThreadPoolExecutor
DISTS = (
"debian:stretch",
"debian:buster",
"debian:bullseye",
"debian:sid",
"ubuntu:xenial",
"ubuntu:bionic",
"ubuntu:cosmic",
"ubuntu:disco",
"ubuntu:eoan",
)
DESC = '''\

View File

@@ -7,22 +7,12 @@ set -e
cd `dirname $0`/..
SAMPLE_CONFIG="docs/sample_config.yaml"
SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml"
check() {
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1
}
if [ "$1" == "--check" ]; then
diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
exit 1
}
diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || {
echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
exit 1
}
else
./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG"
fi

View File

@@ -27,7 +27,7 @@ class Store(object):
"_store_pdu_reference_hash_txn"
]
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
store = Store()

View File

@@ -7,15 +7,7 @@
set -e
if [ $# -ge 1 ]
then
files=$*
else
files="synapse tests scripts-dev scripts"
fi
echo "Linting these locations: $files"
isort -y -rc $files
flake8 $files
python3 -m black $files
isort -y -rc synapse tests scripts-dev scripts
flake8 synapse tests
python3 -m black synapse tests scripts-dev scripts
./scripts-dev/config-lint.sh

View File

@@ -1,184 +0,0 @@
#!/bin/bash
#
# This script generates SQL files for creating a brand new Synapse DB with the latest
# schema, on both SQLite3 and Postgres.
#
# It does so by having Synapse generate an up-to-date SQLite DB, then running
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
POSTGRES_HOST="localhost"
POSTGRES_DB_NAME="synapse_full_schema.$$"
SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres"
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
usage() {
echo
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
echo
echo "-p <postgres_username>"
echo " Username to connect to local postgres instance. The password will be requested"
echo " during script execution."
echo "-c"
echo " CI mode. Enables coverage tracking and prints every command that the script runs."
echo "-o <path>"
echo " Directory to output full schema files to."
echo "-h"
echo " Display this help text."
}
while getopts "p:co:h" opt; do
case $opt in
p)
POSTGRES_USERNAME=$OPTARG
;;
c)
# Print all commands that are being executed
set -x
# Modify required dependencies for coverage
REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess")
COVERAGE=1
;;
o)
command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1)
OUTPUT_DIR="$(realpath "$OPTARG")"
;;
h)
usage
exit
;;
\?)
echo "ERROR: Invalid option: -$OPTARG" >&2
usage
exit
;;
esac
done
# Check that required dependencies are installed
unsatisfied_requirements=()
for dep in "${REQUIRED_DEPS[@]}"; do
pip show "$dep" --quiet || unsatisfied_requirements+=("$dep")
done
if [ ${#unsatisfied_requirements} -ne 0 ]; then
echo "Please install the following python packages: ${unsatisfied_requirements[*]}"
exit 1
fi
if [ -z "$POSTGRES_USERNAME" ]; then
echo "No postgres username supplied"
usage
exit 1
fi
if [ -z "$OUTPUT_DIR" ]; then
echo "No output directory supplied"
usage
exit 1
fi
# Create the output directory if it doesn't exist
mkdir -p "$OUTPUT_DIR"
read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
echo ""
# Exit immediately if a command fails
set -e
# cd to root of the synapse directory
cd "$(dirname "$0")/.."
# Create temporary SQLite and Postgres homeserver db configs and key file
TMPDIR=$(mktemp -d)
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
SQLITE_CONFIG=$TMPDIR/sqlite.conf
SQLITE_DB=$TMPDIR/homeserver.db
POSTGRES_CONFIG=$TMPDIR/postgres.conf
# Ensure these files are delete on script exit
trap 'rm -rf $TMPDIR' EXIT
cat > "$SQLITE_CONFIG" <<EOF
server_name: "test"
signing_key_path: "$KEY_FILE"
macaroon_secret_key: "abcde"
report_stats: false
database:
name: "sqlite3"
args:
database: "$SQLITE_DB"
# Suppress the key server warning.
trusted_key_servers: []
EOF
cat > "$POSTGRES_CONFIG" <<EOF
server_name: "test"
signing_key_path: "$KEY_FILE"
macaroon_secret_key: "abcde"
report_stats: false
database:
name: "psycopg2"
args:
user: "$POSTGRES_USERNAME"
host: "$POSTGRES_HOST"
password: "$POSTGRES_PASSWORD"
database: "$POSTGRES_DB_NAME"
# Suppress the key server warning.
trusted_key_servers: []
EOF
# Generate the server's signing key.
echo "Generating SQLite3 db schema..."
python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
echo "Running db background jobs..."
scripts-dev/update_database --database-config "$SQLITE_CONFIG"
# Create the PostgreSQL database.
echo "Creating postgres database..."
createdb $POSTGRES_DB_NAME
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
if [ -z "$COVERAGE" ]; then
# No coverage needed
scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
else
# Coverage desired
coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
fi
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
# This needs to be done after synapse_port_db is run
echo "Dropping unwanted db tables..."
SQL="
DROP TABLE schema_version;
DROP TABLE applied_schema_deltas;
DROP TABLE applied_module_schemas;
"
sqlite3 "$SQLITE_DB" <<< "$SQL"
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..."
pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE"
echo "Cleaning up temporary Postgres database..."
dropdb $POSTGRES_DB_NAME
echo "Done! Files dumped to: $OUTPUT_DIR"

View File

@@ -1,95 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
import yaml
from twisted.internet import defer, reactor
from synapse.config.homeserver import HomeServerConfig
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.server import HomeServer
from synapse.storage import DataStore
logger = logging.getLogger("update_database")
class MockHomeserver(HomeServer):
DATASTORE_CLASS = DataStore
def __init__(self, config, **kwargs):
super(MockHomeserver, self).__init__(
config.server_name, reactor=reactor, config=config, **kwargs
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Updates a synapse database to the latest schema and runs background updates"
" on it."
)
)
parser.add_argument("-v", action="store_true")
parser.add_argument(
"--database-config",
type=argparse.FileType("r"),
required=True,
help="A database config file for either a SQLite3 database or a PostgreSQL one.",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
}
logging.basicConfig(**logging_config)
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
if "database" not in hs_config:
sys.stderr.write("The configuration file must have a 'database' section.\n")
sys.exit(4)
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
# Instantiate and initialise the homeserver object.
hs = MockHomeserver(config)
# Setup instantiates the store within the homeserver object and updates the
# DB.
hs.setup()
store = hs.get_datastore()
@defer.inlineCallbacks
def run_background_updates():
yield store.db.updates.run_background_updates(sleep=False)
# Stop the reactor to exit the script once every background update is run.
reactor.stop()
# Apply all background updates on the database.
reactor.callWhenRunning(
lambda: run_as_background_process("background_updates", run_background_updates)
)
reactor.run()

View File

@@ -1,94 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from typing import Optional
import nacl.signing
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
def exit(status: int = 0, message: Optional[str] = None):
if message:
print(message, file=sys.stderr)
sys.exit(status)
def format_plain(public_key: nacl.signing.VerifyKey):
print(
"%s:%s %s"
% (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
)
def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int):
print(
' "%s:%s": { key: "%s", expired_ts: %i }'
% (
public_key.alg,
public_key.version,
encode_verify_key_base64(public_key),
expiry_ts,
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
)
parser.add_argument(
"-x",
action="store_true",
dest="for_config",
help="format the output for inclusion in the old_signing_keys config setting",
)
parser.add_argument(
"--expiry-ts",
type=int,
default=int(time.time() * 1000) + 6*3600000,
help=(
"The expiry time to use for -x, in milliseconds since 1970. The default "
"is (now+6h)."
),
)
args = parser.parse_args()
formatter = (
(lambda k: format_for_config(k, args.expiry_ts))
if args.for_config
else format_plain
)
keys = []
for file in args.key_file:
try:
res = read_signing_keys(file)
except Exception as e:
exit(
status=1,
message="Error reading key from file %s: %s %s"
% (file.name, type(e), e),
)
res = []
for key in res:
formatter(get_verify_key(key))

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from synapse.config.logger import DEFAULT_LOG_CONFIG
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--output-file",
type=argparse.FileType("w"),
default=sys.stdout,
help="File to write the configuration to. Default: stdout",
)
parser.add_argument(
"-f",
"--log-file",
type=str,
default="/var/log/matrix-synapse/homeserver.log",
help="name of the log file",
)
args = parser.parse_args()
args.output_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file))

View File

@@ -72,7 +72,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
# check that the original exists
original_file = src_paths.remote_media_filepath(origin_server, file_id)
if not os.path.exists(original_file):
logger.warning(
logger.warn(
"Original for %s/%s (%s) does not exist",
origin_server,
file_id,

View File

@@ -30,7 +30,6 @@ import yaml
from twisted.enterprise import adbapi
from twisted.internet import defer, reactor
from synapse.config.database import DatabaseConnectionConfig
from synapse.config.homeserver import HomeServerConfig
from synapse.logging.context import PreserveLoggingContext
from synapse.storage._base import LoggingTransaction
@@ -48,16 +47,13 @@ from synapse.storage.data_stores.main.media_repository import (
from synapse.storage.data_stores.main.registration import (
RegistrationBackgroundUpdateStore,
)
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
from synapse.storage.data_stores.main.state import MainStateBackgroundUpdateStore
from synapse.storage.data_stores.main.state import StateBackgroundUpdateStore
from synapse.storage.data_stores.main.stats import StatsStore
from synapse.storage.data_stores.main.user_directory import (
UserDirectoryBackgroundUpdateStore,
)
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.database import Database, make_conn
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database
from synapse.util import Clock
@@ -135,23 +131,54 @@ class Store(
EventsBackgroundUpdatesStore,
MediaRepositoryBackgroundUpdateStore,
RegistrationBackgroundUpdateStore,
RoomBackgroundUpdateStore,
RoomMemberBackgroundUpdateStore,
SearchBackgroundUpdateStore,
StateBackgroundUpdateStore,
MainStateBackgroundUpdateStore,
UserDirectoryBackgroundUpdateStore,
StatsStore,
):
def __init__(self, db_conn, hs):
super().__init__(db_conn, hs)
self.db_pool = hs.get_db_pool()
@defer.inlineCallbacks
def runInteraction(self, desc, func, *args, **kwargs):
def r(conn):
try:
i = 0
N = 5
while True:
try:
txn = conn.cursor()
return func(
LoggingTransaction(txn, desc, self.database_engine, [], []),
*args,
**kwargs
)
except self.database_engine.module.DatabaseError as e:
if self.database_engine.is_deadlock(e):
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
if i < N:
i += 1
conn.rollback()
continue
raise
except Exception as e:
logger.debug("[TXN FAIL] {%s} %s", desc, e)
raise
with PreserveLoggingContext():
return (yield self.db_pool.runWithConnection(r))
def execute(self, f, *args, **kwargs):
return self.db.runInteraction(f.__name__, f, *args, **kwargs)
return self.runInteraction(f.__name__, f, *args, **kwargs)
def execute_sql(self, sql, *args):
def r(txn):
txn.execute(sql, args)
return txn.fetchall()
return self.db.runInteraction("execute_sql", r)
return self.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
@@ -166,24 +193,25 @@ class Store(
logger.exception("Failed to insert: %s", table)
raise
def set_room_is_public(self, room_id, is_public):
raise Exception(
"Attempt to set room_is_public during port_db: database not empty?"
)
class MockHomeserver:
def __init__(self, config):
def __init__(self, config, database_engine, db_conn, db_pool):
self.database_engine = database_engine
self.db_conn = db_conn
self.db_pool = db_pool
self.clock = Clock(reactor)
self.config = config
self.hostname = config.server_name
def get_db_conn(self):
return self.db_conn
def get_db_pool(self):
return self.db_pool
def get_clock(self):
return self.clock
def get_reactor(self):
return reactor
class Porter(object):
def __init__(self, **kwargs):
@@ -193,7 +221,7 @@ class Porter(object):
def setup_table(self, table):
if table in APPEND_ONLY_TABLES:
# It's safe to just carry on inserting.
row = yield self.postgres_store.db.simple_select_one(
row = yield self.postgres_store._simple_select_one(
table="port_from_sqlite3",
keyvalues={"table_name": table},
retcols=("forward_rowid", "backward_rowid"),
@@ -203,14 +231,12 @@ class Porter(object):
total_to_port = None
if row is None:
if table == "sent_transactions":
(
forward_chunk,
already_ported,
total_to_port,
) = yield self._setup_sent_transactions()
forward_chunk, already_ported, total_to_port = (
yield self._setup_sent_transactions()
)
backward_chunk = 0
else:
yield self.postgres_store.db.simple_insert(
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={
"table_name": table,
@@ -240,7 +266,7 @@ class Porter(object):
yield self.postgres_store.execute(delete_all)
yield self.postgres_store.db.simple_insert(
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
)
@@ -294,7 +320,7 @@ class Porter(object):
if table == "user_directory_stream_pos":
# We need to make sure there is a single row, `(X, null), as that is
# what synapse expects to be there.
yield self.postgres_store.db.simple_insert(
yield self.postgres_store._simple_insert(
table=table, values={"stream_id": None}
)
self.progress.update(table, table_size) # Mark table as done
@@ -335,9 +361,7 @@ class Porter(object):
return headers, forward_rows, backward_rows
headers, frows, brows = yield self.sqlite_store.db.runInteraction(
"select", r
)
headers, frows, brows = yield self.sqlite_store.runInteraction("select", r)
if frows or brows:
if frows:
@@ -351,7 +375,7 @@ class Porter(object):
def insert(txn):
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
self.postgres_store.db.simple_update_one_txn(
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": table},
@@ -390,7 +414,7 @@ class Porter(object):
return headers, rows
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
headers, rows = yield self.sqlite_store.runInteraction("select", r)
if rows:
forward_chunk = rows[-1][0] + 1
@@ -407,8 +431,8 @@ class Porter(object):
rows_dict = []
for row in rows:
d = dict(zip(headers, row))
if "\0" in d["value"]:
logger.warning("dropping search row %s", d)
if "\0" in d['value']:
logger.warn('dropping search row %s', d)
else:
rows_dict.append(d)
@@ -428,7 +452,7 @@ class Porter(object):
],
)
self.postgres_store.db.simple_update_one_txn(
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": "event_search"},
@@ -447,35 +471,47 @@ class Porter(object):
else:
return
def setup_db(self, db_config: DatabaseConnectionConfig, engine):
db_conn = make_conn(db_config, engine)
prepare_database(db_conn, engine, config=None)
def setup_db(self, db_config, database_engine):
db_conn = database_engine.module.connect(
**{
k: v
for k, v in db_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
prepare_database(db_conn, database_engine, config=None)
db_conn.commit()
return db_conn
@defer.inlineCallbacks
def build_db_store(self, db_config: DatabaseConnectionConfig):
def build_db_store(self, config):
"""Builds and returns a database store using the provided configuration.
Args:
config: The database configuration
config: The database configuration, i.e. a dict following the structure of
the "database" section of Synapse's configuration file.
Returns:
The built Store object.
"""
self.progress.set_state("Preparing %s" % db_config.config["name"])
engine = create_engine(config)
engine = create_engine(db_config.config)
conn = self.setup_db(db_config, engine)
self.progress.set_state("Preparing %s" % config["name"])
conn = self.setup_db(config, engine)
hs = MockHomeserver(self.hs_config)
db_pool = adbapi.ConnectionPool(
config["name"], **config["args"]
)
store = Store(Database(hs, db_config, engine), conn, hs)
hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
yield store.db.runInteraction(
"%s_engine.check_database" % db_config.config["name"],
store = Store(conn, hs)
yield store.runInteraction(
"%s_engine.check_database" % config["name"],
engine.check_database,
)
@@ -484,9 +520,7 @@ class Porter(object):
@defer.inlineCallbacks
def run_background_updates_on_postgres(self):
# Manually apply all background updates on the PostgreSQL database.
postgres_ready = (
yield self.postgres_store.db.updates.has_completed_background_updates()
)
postgres_ready = yield self.postgres_store.has_completed_background_updates()
if not postgres_ready:
# Only say that we're running background updates when there are background
@@ -494,22 +528,18 @@ class Porter(object):
self.progress.set_state("Running background updates on PostgreSQL")
while not postgres_ready:
yield self.postgres_store.db.updates.do_next_background_update(100)
yield self.postgres_store.do_next_background_update(100)
postgres_ready = yield (
self.postgres_store.db.updates.has_completed_background_updates()
self.postgres_store.has_completed_background_updates()
)
@defer.inlineCallbacks
def run(self):
try:
self.sqlite_store = yield self.build_db_store(
DatabaseConnectionConfig("master-sqlite", self.sqlite_config)
)
self.sqlite_store = yield self.build_db_store(self.sqlite_config)
# Check if all background updates are done, abort if not.
updates_complete = (
yield self.sqlite_store.db.updates.has_completed_background_updates()
)
updates_complete = yield self.sqlite_store.has_completed_background_updates()
if not updates_complete:
sys.stderr.write(
"Pending background updates exist in the SQLite3 database."
@@ -519,7 +549,7 @@ class Porter(object):
defer.returnValue(None)
self.postgres_store = yield self.build_db_store(
self.hs_config.get_single_database()
self.hs_config.database_config
)
yield self.run_background_updates_on_postgres()
@@ -550,22 +580,22 @@ class Porter(object):
)
try:
yield self.postgres_store.db.runInteraction("alter_table", alter_table)
yield self.postgres_store.runInteraction("alter_table", alter_table)
except Exception:
# On Error Resume Next
pass
yield self.postgres_store.db.runInteraction(
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store.db.simple_select_onecol(
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
)
postgres_tables = yield self.postgres_store.db.simple_select_onecol(
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={},
retcol="distinct table_name",
@@ -617,7 +647,7 @@ class Porter(object):
if isinstance(col, bytes):
return bytearray(col)
elif isinstance(col, string_types) and "\0" in col:
logger.warning(
logger.warn(
"DROPPING ROW: NUL value in table %s col %s: %r",
table,
headers[j],
@@ -655,11 +685,11 @@ class Porter(object):
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
ts_ind = headers.index("ts")
ts_ind = headers.index('ts')
return headers, [r for r in rows if r[ts_ind] < yesterday]
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
headers, rows = yield self.sqlite_store.runInteraction("select", r)
rows = self._convert_rows("sent_transactions", headers, rows)
@@ -692,7 +722,7 @@ class Porter(object):
next_chunk = yield self.sqlite_store.execute(get_start_id)
next_chunk = max(max_inserted_rowid + 1, next_chunk)
yield self.postgres_store.db.simple_insert(
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={
"table_name": "sent_transactions",
@@ -705,7 +735,7 @@ class Porter(object):
txn.execute(
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
)
(size,) = txn.fetchone()
size, = txn.fetchone()
return int(size)
remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
@@ -752,13 +782,10 @@ class Porter(object):
def _setup_state_group_id_seq(self):
def r(txn):
txn.execute("SELECT MAX(id) FROM state_groups")
curr_id = txn.fetchone()[0]
if not curr_id:
return
next_id = curr_id + 1
next_id = txn.fetchone()[0] + 1
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
return self.postgres_store.db.runInteraction("setup_state_group_id_seq", r)
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
##############################################
@@ -839,7 +866,7 @@ class CursesProgress(Progress):
duration = int(now) - int(self.start_time)
minutes, seconds = divmod(duration, 60)
duration_str = "%02dm %02ds" % (minutes, seconds)
duration_str = '%02dm %02ds' % (minutes, seconds)
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
@@ -849,7 +876,7 @@ class CursesProgress(Progress):
left = float(self.total_remaining) / self.total_processed
est_remaining = (int(now) - self.start_time) * left
est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60)
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
else:
est_remaining_str = "Unknown"
status = "Time spent: %s (est. remaining: %s)" % (
@@ -935,7 +962,7 @@ if __name__ == "__main__":
description="A script to port an existing synapse SQLite database to"
" a new PostgreSQL database."
)
parser.add_argument("-v", action="store_true")
parser.add_argument("-v", action='store_true')
parser.add_argument(
"--sqlite-database",
required=True,
@@ -944,12 +971,12 @@ if __name__ == "__main__":
)
parser.add_argument(
"--postgres-config",
type=argparse.FileType("r"),
type=argparse.FileType('r'),
required=True,
help="The database config file for the PostgreSQL database",
)
parser.add_argument(
"--curses", action="store_true", help="display a curses based progress UI"
"--curses", action='store_true', help="display a curses based progress UI"
)
parser.add_argument(
@@ -1025,4 +1052,3 @@ if __name__ == "__main__":
if end_error_exec_info:
exc_type, exc_value, exc_traceback = end_error_exec_info
traceback.print_exception(exc_type, exc_value, exc_traceback)
sys.exit(5)

View File

@@ -20,23 +20,3 @@ parts:
source: .
plugin: python
python-version: python3
python-packages:
- '.[all]'
build-packages:
- libffi-dev
- libturbojpeg0-dev
- libssl-dev
- libxslt1-dev
- libpq-dev
- zlib1g-dev
stage-packages:
- libasn1-8-heimdal
- libgssapi3-heimdal
- libhcrypto4-heimdal
- libheimbase1-heimdal
- libheimntlm0-heimdal
- libhx509-5-heimdal
- libkrb5-26-heimdal
- libldap-2.4-2
- libpq5
- libsasl2-2

View File

@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix homeserver.
""" This is a reference implementation of a Matrix home server.
"""
import os
@@ -36,7 +36,7 @@ try:
except ImportError:
pass
__version__ = "1.8.0"
__version__ = "1.5.1"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View File

@@ -144,8 +144,8 @@ def main():
logging.captureWarnings(True)
parser = argparse.ArgumentParser(
description="Used to register new users with a given homeserver when"
" registration has been disabled. The homeserver must be"
description="Used to register new users with a given home server when"
" registration has been disabled. The home server must be"
" configured with the 'registration_shared_secret' option"
" set."
)
@@ -202,7 +202,7 @@ def main():
"server_url",
default="https://localhost:8448",
nargs="?",
help="URL to use to talk to the homeserver. Defaults to "
help="URL to use to talk to the home server. Defaults to "
" 'https://localhost:8448'.",
)

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import logging
from typing import Dict, Tuple
from six import itervalues
@@ -26,7 +25,13 @@ from twisted.internet import defer
import synapse.logging.opentracing as opentracing
import synapse.types
from synapse import event_auth
from synapse.api.constants import EventTypes, LimitBlockingTypes, Membership, UserTypes
from synapse.api.constants import (
EventTypes,
JoinRules,
LimitBlockingTypes,
Membership,
UserTypes,
)
from synapse.api.errors import (
AuthError,
Codes,
@@ -79,7 +84,7 @@ class Auth(object):
@defer.inlineCallbacks
def check_from_context(self, room_version, event, context, do_sig_check=True):
prev_state_ids = yield context.get_prev_state_ids()
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.compute_auth_events(
event, prev_state_ids, for_verification=True
)
@@ -492,7 +497,7 @@ class Auth(object):
token = self.get_access_token_from_request(request)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warning("Unrecognised appservice access token.")
logger.warn("Unrecognised appservice access token.")
raise InvalidClientTokenError()
request.authenticated_entity = service.sender
return defer.succeed(service)
@@ -508,43 +513,71 @@ class Auth(object):
"""
return self.store.is_server_admin(user)
def compute_auth_events(
self,
event,
current_state_ids: Dict[Tuple[str, str], str],
for_verification: bool = False,
):
"""Given an event and current state return the list of event IDs used
to auth an event.
If `for_verification` is False then only return auth events that
should be added to the event's `auth_events`.
Returns:
defer.Deferred(list[str]): List of event IDs.
"""
@defer.inlineCallbacks
def compute_auth_events(self, event, current_state_ids, for_verification=False):
if event.type == EventTypes.Create:
return defer.succeed([])
# Currently we ignore the `for_verification` flag even though there are
# some situations where we can drop particular auth events when adding
# to the event's `auth_events` (e.g. joins pointing to previous joins
# when room is publically joinable). Dropping event IDs has the
# advantage that the auth chain for the room grows slower, but we use
# the auth chain in state resolution v2 to order events, which means
# care must be taken if dropping events to ensure that it doesn't
# introduce undesirable "state reset" behaviour.
#
# All of which sounds a bit tricky so we don't bother for now.
return []
auth_ids = []
for etype, state_key in event_auth.auth_types_for_event(event):
auth_ev_id = current_state_ids.get((etype, state_key))
if auth_ev_id:
auth_ids.append(auth_ev_id)
return defer.succeed(auth_ids)
key = (EventTypes.PowerLevels, "")
power_level_event_id = current_state_ids.get(key)
if power_level_event_id:
auth_ids.append(power_level_event_id)
key = (EventTypes.JoinRules, "")
join_rule_event_id = current_state_ids.get(key)
key = (EventTypes.Member, event.sender)
member_event_id = current_state_ids.get(key)
key = (EventTypes.Create, "")
create_event_id = current_state_ids.get(key)
if create_event_id:
auth_ids.append(create_event_id)
if join_rule_event_id:
join_rule_event = yield self.store.get_event(join_rule_event_id)
join_rule = join_rule_event.content.get("join_rule")
is_public = join_rule == JoinRules.PUBLIC if join_rule else False
else:
is_public = False
if event.type == EventTypes.Member:
e_type = event.content["membership"]
if e_type in [Membership.JOIN, Membership.INVITE]:
if join_rule_event_id:
auth_ids.append(join_rule_event_id)
if e_type == Membership.JOIN:
if member_event_id and not is_public:
auth_ids.append(member_event_id)
else:
if member_event_id:
auth_ids.append(member_event_id)
if for_verification:
key = (EventTypes.Member, event.state_key)
existing_event_id = current_state_ids.get(key)
if existing_event_id:
auth_ids.append(existing_event_id)
if e_type == Membership.INVITE:
if "third_party_invite" in event.content:
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"],
)
third_party_invite_id = current_state_ids.get(key)
if third_party_invite_id:
auth_ids.append(third_party_invite_id)
elif member_event_id:
member_event = yield self.store.get_event(member_event_id)
if member_event.content["membership"] == Membership.JOIN:
auth_ids.append(member_event.event_id)
return auth_ids
@defer.inlineCallbacks
def check_can_change_room_list(self, room_id, user):

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -95,8 +94,6 @@ class EventTypes(object):
ServerACL = "m.room.server_acl"
Pinned = "m.room.pinned_events"
Retention = "m.room.retention"
class RejectedReason(object):
AUTH_ERROR = "auth_error"
@@ -141,14 +138,3 @@ class LimitBlockingTypes(object):
MONTHLY_ACTIVE_USER = "monthly_active_user"
HS_DISABLED = "hs_disabled"
class EventContentFields(object):
"""Fields found in events' content, regardless of type."""
# Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
LABELS = "org.matrix.labels"
# Timestamp to delete the event after
# cf https://github.com/matrix-org/matrix-doc/pull/2228
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"

View File

@@ -457,7 +457,7 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
class FederationError(RuntimeError):
""" This class is used to inform remote homeservers about erroneous
""" This class is used to inform remote home servers about erroneous
PDUs they sent us.
FATAL: The remote server could not interpret the source event.

View File

@@ -1,8 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,7 +20,6 @@ from jsonschema import FormatChecker
from twisted.internet import defer
from synapse.api.constants import EventContentFields
from synapse.api.errors import SynapseError
from synapse.storage.presence import UserPresenceState
from synapse.types import RoomID, UserID
@@ -70,10 +66,6 @@ ROOM_EVENT_FILTER_SCHEMA = {
"contains_url": {"type": "boolean"},
"lazy_load_members": {"type": "boolean"},
"include_redundant_members": {"type": "boolean"},
# Include or exclude events with the provided labels.
# cf https://github.com/matrix-org/matrix-doc/pull/2326
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
},
}
@@ -267,9 +259,6 @@ class Filter(object):
self.contains_url = self.filter_json.get("contains_url", None)
self.labels = self.filter_json.get("org.matrix.labels", None)
self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
def filters_all_types(self):
return "*" in self.not_types
@@ -293,7 +282,6 @@ class Filter(object):
room_id = None
ev_type = "m.presence"
contains_url = False
labels = []
else:
sender = event.get("sender", None)
if not sender:
@@ -312,11 +300,10 @@ class Filter(object):
content = event.get("content", {})
# check if there is a string url field in the content for filtering purposes
contains_url = isinstance(content.get("url"), text_type)
labels = content.get(EventContentFields.LABELS, [])
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
return self.check_fields(room_id, sender, ev_type, contains_url)
def check_fields(self, room_id, sender, event_type, labels, contains_url):
def check_fields(self, room_id, sender, event_type, contains_url):
"""Checks whether the filter matches the given event fields.
Returns:
@@ -326,7 +313,6 @@ class Filter(object):
"rooms": lambda v: room_id == v,
"senders": lambda v: sender == v,
"types": lambda v: _matches_wildcard(event_type, v),
"labels": lambda v: v in labels,
}
for name, match_func in literal_keys.items():

View File

@@ -29,6 +29,7 @@ FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
STATIC_PREFIX = "/_matrix/static"
WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/r0"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"

View File

@@ -44,8 +44,6 @@ def check_bind_error(e, address, bind_addresses):
bind_addresses (list): Addresses on which the service listens.
"""
if address == "0.0.0.0" and "::" in bind_addresses:
logger.warning(
"Failed to listen on 0.0.0.0, continuing because listening on [::]"
)
logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
else:
raise e

View File

@@ -237,12 +237,6 @@ def start(hs, listeners=None):
"""
Start a Synapse server or worker.
Should be called once the reactor is running and (if we're using ACME) the
TLS certificates are in place.
Will start the main HTTP listeners and do some other startup tasks, and then
notify systemd.
Args:
hs (synapse.server.HomeServer)
listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
@@ -275,7 +269,7 @@ def start(hs, listeners=None):
# It is now safe to start your Synapse.
hs.start_listening(listeners)
hs.get_datastore().db.start_profiling()
hs.get_datastore().start_profiling()
setup_sentry(hs)
setup_sdnotify(hs)
@@ -317,7 +311,9 @@ def setup_sdnotify(hs):
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
hs.get_reactor().addSystemEventTrigger(
"after", "startup", sdnotify, b"READY=1\nMAINPID=%i" % (os.getpid(),)
)
hs.get_reactor().addSystemEventTrigger(
"before", "shutdown", sdnotify, b"STOPPING=1"

View File

@@ -45,6 +45,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string
@@ -104,10 +105,8 @@ def export_data_command(hs, args):
user_id = args.user_id
directory = args.output_directory
res = yield defer.ensureDeferred(
hs.get_handlers().admin_handler.export_user_data(
user_id, FileExfiltrationWriter(user_id, directory=directory)
)
res = yield hs.get_handlers().admin_handler.export_user_data(
user_id, FileExfiltrationWriter(user_id, directory=directory)
)
print(res)
@@ -230,10 +229,14 @@ def start(config_options):
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = AdminCmdServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -34,6 +34,7 @@ from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -93,7 +94,7 @@ class AppserviceServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -102,7 +103,7 @@ class AppserviceServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -142,6 +143,8 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
if config.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
@@ -156,8 +159,10 @@ def start(config_options):
ps = AppserviceServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ps, config, use_worker_options=True)

View File

@@ -62,6 +62,7 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -152,7 +153,7 @@ class ClientReaderServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -161,7 +162,7 @@ class ClientReaderServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -180,10 +181,14 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = ClientReaderServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import (
)
from synapse.server import HomeServer
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -146,7 +147,7 @@ class EventCreatorServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -155,7 +156,7 @@ class EventCreatorServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -179,10 +180,14 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = EventCreatorServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -46,6 +46,7 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -131,7 +132,7 @@ class FederationReaderServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -140,7 +141,7 @@ class FederationReaderServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -161,10 +162,14 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = FederationReaderServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -40,7 +40,7 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams._base import ReceiptsStream
from synapse.server import HomeServer
from synapse.storage.database import Database
from synapse.storage.engines import create_engine
from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
@@ -59,8 +59,8 @@ class FederationSenderSlaveStore(
SlavedDeviceStore,
SlavedPresenceStore,
):
def __init__(self, database: Database, db_conn, hs):
super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs)
def __init__(self, db_conn, hs):
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
# We pull out the current federation stream position now so that we
# always have a known value for the federation position in memory so
@@ -69,7 +69,7 @@ class FederationSenderSlaveStore(
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
def _get_federation_out_pos(self, db_conn):
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
sql = "SELECT stream_id FROM federation_stream_position" " WHERE type = ?"
sql = self.database_engine.convert_param_style(sql)
txn = db_conn.cursor()
@@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -173,6 +173,8 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
if config.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
@@ -187,8 +189,10 @@ def start(config_options):
ss = FederationSenderServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -39,6 +39,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -203,7 +204,7 @@ class FrontendProxyServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -212,7 +213,7 @@ class FrontendProxyServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -233,10 +234,14 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = FrontendProxyServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -19,13 +19,12 @@ from __future__ import print_function
import gc
import logging
import math
import os
import resource
import sys
from six import iteritems
import psutil
from prometheus_client import Gauge
from twisted.application import service
@@ -39,6 +38,7 @@ import synapse
import synapse.config.logger
from synapse import events
from synapse.api.urls import (
CONTENT_REPO_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
@@ -64,11 +64,12 @@ from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
from synapse.rest.admin import AdminRestResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.well_known import WellKnownResource
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.storage import DataStore, are_all_users_on_domain
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
@@ -221,7 +222,13 @@ class SynapseHomeServer(HomeServer):
if self.get_config().enable_media_repo:
media_repo = self.get_media_repository_resource()
resources.update(
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
self, self.config.uploads_path
),
}
)
elif name == "media":
raise ConfigError(
@@ -275,7 +282,7 @@ class SynapseHomeServer(HomeServer):
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -284,7 +291,23 @@ class SynapseHomeServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
def run_startup_checks(self, db_conn, database_engine):
all_users_native = are_all_users_on_domain(
db_conn.cursor(), database_engine, self.hostname
)
if not all_users_native:
quit_with_error(
"Found users in database not native to %s!\n"
"You cannot changed a synapse server_name after it's been configured"
% (self.hostname,)
)
try:
database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e:
quit_with_error(str(e))
# Gauges to expose monthly active user control metrics
@@ -310,7 +333,7 @@ def setup(config_options):
"Synapse Homeserver", config_options
)
except ConfigError as e:
sys.stderr.write("\nERROR: %s\n" % (e,))
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
if not config:
@@ -320,23 +343,40 @@ def setup(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
hs = SynapseHomeServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
logger.info("Setting up server")
logger.info("Preparing database: %s...", config.database_config["name"])
try:
hs.setup()
except IncorrectDatabaseSetup as e:
quit_with_error(str(e))
except UpgradeDatabaseException as e:
quit_with_error("Failed to upgrade database: %s" % (e,))
with hs.get_db_conn(run_new_connection=False) as db_conn:
prepare_database(db_conn, database_engine, config=config)
database_engine.on_new_connection(db_conn)
hs.run_startup_checks(db_conn, database_engine)
db_conn.commit()
except UpgradeDatabaseException:
sys.stderr.write(
"\nFailed to upgrade database.\n"
"Have you checked for version specific instructions in"
" UPGRADES.rst?\n"
)
sys.exit(1)
logger.info("Database prepared in %s.", config.database_config["name"])
hs.setup()
hs.setup_master()
@defer.inlineCallbacks
@@ -395,7 +435,7 @@ def setup(config_options):
_base.start(hs, config.listeners)
hs.get_pusherpool().start()
hs.get_datastore().db.updates.start_doing_background_updates()
hs.get_datastore().start_doing_background_updates()
except Exception:
# Print the exception and bail out.
print("Error during startup:", file=sys.stderr)
@@ -431,89 +471,6 @@ class SynapseService(service.Service):
return self._port.stopListening()
# Contains the list of processes we will be monitoring
# currently either 0 or 1
_stats_process = []
@defer.inlineCallbacks
def phone_stats_home(hs, stats, stats_process=_stats_process):
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
uptime = int(now - hs.start_time)
if uptime < 0:
uptime = 0
stats["homeserver"] = hs.config.server_name
stats["server_context"] = hs.config.server_context
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
version = sys.version_info
stats["python_version"] = "{}.{}.{}".format(
version.major, version.minor, version.micro
)
stats["total_users"] = yield hs.get_datastore().count_all_users()
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
for name, count in iteritems(daily_user_type_results):
stats["daily_user_type_" + name] = count
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
r30_results = yield hs.get_datastore().count_r30_users()
for name, count in iteritems(r30_results):
stats["r30_users_" + name] = count
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
#
# Performance statistics
#
old = stats_process[0]
new = (now, resource.getrusage(resource.RUSAGE_SELF))
stats_process[0] = new
# Get RSS in bytes
stats["memory_rss"] = new[1].ru_maxrss
# Get CPU time in % of a single core, not % of all cores
used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - (
old[1].ru_utime + old[1].ru_stime
)
if used_cpu_time == 0 or new[0] == old[0]:
stats["cpu_average"] = 0
else:
stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
#
# Database version
#
# This only reports info about the *main* database.
stats["database_engine"] = hs.get_datastore().db.engine.module.__name__
stats["database_server_version"] = hs.get_datastore().db.engine.server_version
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
try:
yield hs.get_proxied_http_client().put_json(
hs.config.report_stats_endpoint, stats
)
except Exception as e:
logger.warning("Error reporting stats: %s", e)
def run(hs):
PROFILE_SYNAPSE = False
if PROFILE_SYNAPSE:
@@ -540,19 +497,91 @@ def run(hs):
reactor.run = profile(reactor.run)
clock = hs.get_clock()
start_time = clock.time()
stats = {}
def performance_stats_init():
_stats_process.clear()
_stats_process.append(
(int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF))
)
# Contains the list of processes we will be monitoring
# currently either 0 or 1
stats_process = []
def start_phone_stats_home():
return run_as_background_process(
"phone_stats_home", phone_stats_home, hs, stats
return run_as_background_process("phone_stats_home", phone_stats_home)
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
uptime = int(now - start_time)
if uptime < 0:
uptime = 0
stats["homeserver"] = hs.config.server_name
stats["server_context"] = hs.config.server_context
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
version = sys.version_info
stats["python_version"] = "{}.{}.{}".format(
version.major, version.minor, version.micro
)
stats["total_users"] = yield hs.get_datastore().count_all_users()
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
for name, count in iteritems(daily_user_type_results):
stats["daily_user_type_" + name] = count
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
stats[
"daily_active_rooms"
] = yield hs.get_datastore().count_daily_active_rooms()
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
r30_results = yield hs.get_datastore().count_r30_users()
for name, count in iteritems(r30_results):
stats["r30_users_" + name] = count
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
if len(stats_process) > 0:
stats["memory_rss"] = 0
stats["cpu_average"] = 0
for process in stats_process:
stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None))
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
logger.info(
"Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
)
try:
yield hs.get_simple_http_client().put_json(
hs.config.report_stats_endpoint, stats
)
except Exception as e:
logger.warn("Error reporting stats: %s", e)
def performance_stats_init():
try:
process = psutil.Process()
# Ensure we can fetch both, and make the initial request for cpu_percent
# so the next request will use this as the initial point.
process.memory_info().rss
process.cpu_percent(interval=None)
logger.info("report_stats can use psutil")
stats_process.append(process)
except (AttributeError):
logger.warning("Unable to read memory/cpu stats. Disabling reporting.")
def generate_user_daily_visit_stats():
return run_as_background_process(

View File

@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
@@ -37,8 +37,10 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.server import HomeServer
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -81,6 +83,9 @@ class MediaRepositoryServer(HomeServer):
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
self, self.config.uploads_path
),
"/_synapse/admin": admin_resource,
}
)
@@ -115,7 +120,7 @@ class MediaRepositoryServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -124,7 +129,7 @@ class MediaRepositoryServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -152,10 +157,14 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = MediaRepositoryServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -33,10 +33,10 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
@@ -45,11 +45,7 @@ logger = logging.getLogger("synapse.app.pusher")
class PusherSlaveStore(
SlavedEventStore,
SlavedPusherStore,
SlavedReceiptsStore,
SlavedAccountDataStore,
RoomStore,
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore, SlavedAccountDataStore
):
update_pusher_last_stream_ordering_and_success = __func__(
DataStore.update_pusher_last_stream_ordering_and_success
@@ -118,7 +114,7 @@ class PusherServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -127,7 +123,7 @@ class PusherServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -202,10 +198,14 @@ def start(config_options):
# Force the pushers to start since they will be disabled in the main config
config.start_pushers = True
database_engine = create_engine(config.database_config)
ps = PusherServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ps, config, use_worker_options=True)

View File

@@ -48,13 +48,14 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow
from synapse.replication.tcp.streams.events import EventsStreamEventRow
from synapse.rest.client.v1 import events
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
from synapse.rest.client.v2_alpha import sync
from synapse.server import HomeServer
from synapse.storage.data_stores.main.presence import UserPresenceState
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
@@ -150,7 +151,7 @@ class SynchrotronPresence(object):
def set_state(self, user, state, ignore_status_msg=False):
# TODO Hows this supposed to work?
return defer.succeed(None)
pass
get_states = __func__(PresenceHandler.get_states)
get_state = __func__(PresenceHandler.get_state)
@@ -325,7 +326,7 @@ class SynchrotronServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -334,7 +335,7 @@ class SynchrotronServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -371,7 +372,8 @@ class SyncReplicationHandler(ReplicationClientHandler):
def get_currently_syncing_users(self):
return self.presence_handler.get_currently_syncing_users()
async def process_and_notify(self, stream_name, token, rows):
@defer.inlineCallbacks
def process_and_notify(self, stream_name, token, rows):
try:
if stream_name == "events":
# We shouldn't get multiple rows per token for events stream, so
@@ -379,14 +381,7 @@ class SyncReplicationHandler(ReplicationClientHandler):
for row in rows:
if row.type != EventsStreamEventRow.TypeId:
continue
assert isinstance(row, EventsStreamRow)
event = await self.store.get_event(
row.data.event_id, allow_rejected=True
)
if event.rejected_reason:
continue
event = yield self.store.get_event(row.data.event_id)
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
@@ -418,11 +413,11 @@ class SyncReplicationHandler(ReplicationClientHandler):
elif stream_name == "device_lists":
all_room_ids = set()
for row in rows:
room_ids = await self.store.get_rooms_for_user(row.user_id)
room_ids = yield self.store.get_rooms_for_user(row.user_id)
all_room_ids.update(room_ids)
self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
elif stream_name == "presence":
await self.presence_handler.process_replication_rows(token, rows)
yield self.presence_handler.process_replication_rows(token, rows)
elif stream_name == "receipts":
self.notifier.on_new_event(
"groups_key", token, users=[row.user_id for row in rows]
@@ -442,10 +437,14 @@ def start(config_options):
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = SynchrotronServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
application_service_handler=SynchrotronApplicationService(),
)

View File

@@ -43,7 +43,7 @@ from synapse.replication.tcp.streams.events import (
from synapse.rest.client.v2_alpha import user_directory
from synapse.server import HomeServer
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
from synapse.storage.database import Database
from synapse.storage.engines import create_engine
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
@@ -60,11 +60,11 @@ class UserDirectorySlaveStore(
UserDirectoryStore,
BaseSlavedStore,
):
def __init__(self, database: Database, db_conn, hs):
super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs)
def __init__(self, db_conn, hs):
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
events_max = self._stream_id_gen.get_current_token()
curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
db_conn,
"current_state_delta_stream",
entity_column="room_id",
@@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer):
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
logger.warn(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
@@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer):
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
logger.warn("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
@@ -199,6 +199,8 @@ def start(config_options):
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
if config.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
@@ -213,8 +215,10 @@ def start(config_options):
ss = UserDirectoryServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)

View File

@@ -94,9 +94,7 @@ class ApplicationService(object):
ip_range_whitelist=None,
):
self.token = token
self.url = (
url.rstrip("/") if isinstance(url, str) else None
) # url must not end with a slash
self.url = url
self.hs_token = hs_token
self.sender = sender
self.server_name = hostname

View File

@@ -185,7 +185,7 @@ class ApplicationServiceApi(SimpleHttpClient):
if not _is_valid_3pe_metadata(info):
logger.warning(
"query_3pe_protocol to %s did not return a valid result", uri
"query_3pe_protocol to %s did not return a" " valid result", uri
)
return None

View File

@@ -134,7 +134,7 @@ def _load_appservice(hostname, as_info, config_filename):
for regex_obj in as_info["namespaces"][ns]:
if not isinstance(regex_obj, dict):
raise ValueError(
"Expected namespace entry in %s to be an object, but got %s",
"Expected namespace entry in %s to be an object," " but got %s",
ns,
regex_obj,
)

View File

@@ -35,11 +35,11 @@ class CaptchaConfig(Config):
## Captcha ##
# See docs/CAPTCHA_SETUP for full details of configuring this.
# This homeserver's ReCAPTCHA public key.
# This Home Server's ReCAPTCHA public key.
#
#recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This homeserver's ReCAPTCHA private key.
# This Home Server's ReCAPTCHA private key.
#
#recaptcha_private_key: "YOUR_PRIVATE_KEY"

View File

@@ -12,45 +12,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from textwrap import indent
import yaml
from synapse.config._base import Config, ConfigError
logger = logging.getLogger(__name__)
class DatabaseConnectionConfig:
"""Contains the connection config for a particular database.
Args:
name: A label for the database, used for logging.
db_config: The config for a particular database, as per `database`
section of main config. Has three fields: `name` for database
module name, `args` for the args to give to the database
connector, and optional `data_stores` that is a list of stores to
provision on this database (defaulting to all).
"""
def __init__(self, name: str, db_config: dict):
if db_config["name"] not in ("sqlite3", "psycopg2"):
raise ConfigError("Unsupported database type %r" % (db_config["name"],))
if db_config["name"] == "sqlite3":
db_config.setdefault("args", {}).update(
{"cp_min": 1, "cp_max": 1, "check_same_thread": False}
)
data_stores = db_config.get("data_stores")
if data_stores is None:
data_stores = ["main", "state"]
self.name = name
self.config = db_config
self.data_stores = data_stores
from ._base import Config
class DatabaseConfig(Config):
@@ -59,43 +26,22 @@ class DatabaseConfig(Config):
def read_config(self, config, **kwargs):
self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K"))
# We *experimentally* support specifying multiple databases via the
# `databases` key. This is a map from a label to database config in the
# same format as the `database` config option, plus an extra
# `data_stores` key to specify which data store goes where. For example:
#
# databases:
# master:
# name: psycopg2
# data_stores: ["main"]
# args: {}
# state:
# name: psycopg2
# data_stores: ["state"]
# args: {}
self.database_config = config.get("database")
multi_database_config = config.get("databases")
database_config = config.get("database")
if multi_database_config and database_config:
raise ConfigError("Can't specify both 'database' and 'datbases' in config")
if multi_database_config:
if config.get("database_path"):
raise ConfigError("Can't specify 'database_path' with 'databases'")
self.databases = [
DatabaseConnectionConfig(name, db_conf)
for name, db_conf in multi_database_config.items()
]
if self.database_config is None:
self.database_config = {"name": "sqlite3", "args": {}}
name = self.database_config.get("name", None)
if name == "psycopg2":
pass
elif name == "sqlite3":
self.database_config.setdefault("args", {}).update(
{"cp_min": 1, "cp_max": 1, "check_same_thread": False}
)
else:
if database_config is None:
database_config = {"name": "sqlite3", "args": {}}
raise RuntimeError("Unsupported database type '%s'" % (name,))
self.databases = [DatabaseConnectionConfig("master", database_config)]
self.set_databasepath(config.get("database_path"))
self.set_databasepath(config.get("database_path"))
def generate_config_section(self, data_dir_path, database_conf, **kwargs):
if not database_conf:
@@ -130,24 +76,11 @@ class DatabaseConfig(Config):
self.set_databasepath(args.database_path)
def set_databasepath(self, database_path):
if database_path is None:
return
if database_path != ":memory:":
database_path = self.abspath(database_path)
# We only support setting a database path if we have a single sqlite3
# database.
if len(self.databases) != 1:
raise ConfigError("Cannot specify 'database_path' with multiple databases")
database = self.get_single_database()
if database.config["name"] != "sqlite3":
# We don't raise here as we haven't done so before for this case.
logger.warn("Ignoring 'database_path' for non-sqlite3 database")
return
database.config["args"]["database"] = database_path
if self.database_config.get("name", None) == "sqlite3":
if database_path is not None:
self.database_config["args"]["database"] = database_path
@staticmethod
def add_arguments(parser):
@@ -158,11 +91,3 @@ class DatabaseConfig(Config):
metavar="SQLITE_DATABASE_PATH",
help="The path to a sqlite database to use.",
)
def get_single_database(self) -> DatabaseConnectionConfig:
"""Returns the database if there is only one, useful for e.g. tests
"""
if len(self.databases) != 1:
raise Exception("More than one database exists")
return self.databases[0]

View File

@@ -21,7 +21,6 @@ from __future__ import print_function
import email.utils
import os
from enum import Enum
from typing import Optional
import pkg_resources
@@ -102,7 +101,7 @@ class EmailConfig(Config):
# both in RegistrationConfig and here. We should factor this bit out
self.account_threepid_delegate_email = self.trusted_third_party_id_servers[
0
] # type: Optional[str]
]
self.using_identity_server_from_trusted_list = True
else:
raise ConfigError(
@@ -147,8 +146,6 @@ class EmailConfig(Config):
if k not in email_config:
missing.append("email." + k)
# public_baseurl is required to build password reset and validation links that
# will be emailed to users
if config.get("public_baseurl") is None:
missing.append("public_baseurl")
@@ -308,23 +305,8 @@ class EmailConfig(Config):
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: false
#
# # notif_from defines the "From" address to use when sending emails.
# # It must be set if email sending is enabled.
# #
# # The placeholder '%(app)s' will be replaced by the application name,
# # which is normally 'app_name' (below), but may be overridden by the
# # Matrix client application.
# #
# # Note that the placeholder must be written '%(app)s', including the
# # trailing 's'.
# #
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
#
# # app_name defines the default value for '%(app)s' in notif_from. It
# # defaults to 'Matrix'.
# #
# #app_name: my_branded_matrix_server
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
#
# # Enable email notifications by default
# #

View File

@@ -108,7 +108,7 @@ class KeyConfig(Config):
self.signing_key = self.read_signing_keys(signing_key_path, "signing_key")
self.old_signing_keys = self.read_old_signing_keys(
config.get("old_signing_keys")
config.get("old_signing_keys", {})
)
self.key_refresh_interval = self.parse_duration(
config.get("key_refresh_interval", "1d")
@@ -125,7 +125,7 @@ class KeyConfig(Config):
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
key_servers = [{"server_name": "matrix.org"}]
else:
key_servers = config.get("trusted_key_servers", [])
@@ -156,7 +156,7 @@ class KeyConfig(Config):
if not self.macaroon_secret_key:
# Unfortunately, there are people out there that don't have this
# set. Lets just be "nice" and derive one from their secret key.
logger.warning("Config is missing macaroon_secret_key")
logger.warn("Config is missing macaroon_secret_key")
seed = bytes(self.signing_key[0])
self.macaroon_secret_key = hashlib.sha256(seed).digest()
@@ -199,19 +199,14 @@ class KeyConfig(Config):
signing_key_path: "%(base_key_name)s.signing.key"
# The keys that the server used to sign messages with but won't use
# to sign new messages.
# to sign new messages. E.g. it has lost its private key
#
old_signing_keys:
# For each key, `key` should be the base64-encoded public key, and
# `expired_ts`should be the time (in milliseconds since the unix epoch) that
# it was last used.
#
# It is possible to build an entry from an old signing.key file using the
# `export_signing_key` script which is provided with synapse.
#
# For example:
#
#"ed25519:id": { key: "base64string", expired_ts: 123456789123 }
#old_signing_keys:
# "ed25519:auto":
# # Base64 encoded public key
# key: "The public part of your old signing key."
# # Millisecond POSIX timestamp when the key expired.
# expired_ts: 123456789123
# How long key response published by this server is valid for.
# Used to set the valid_until_ts in /key/v2 APIs.
@@ -295,8 +290,6 @@ class KeyConfig(Config):
raise ConfigError("Error reading %s: %s" % (name, str(e)))
def read_old_signing_keys(self, old_signing_keys):
if old_signing_keys is None:
return {}
keys = {}
for key_id, key_data in old_signing_keys.items():
if is_signing_algorithm_supported(key_id):

View File

@@ -12,7 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import logging.config
import os
@@ -37,17 +37,10 @@ from synapse.logging._structured import (
from synapse.logging.context import LoggingContextFilter
from synapse.util.versionstring import get_version_string
from ._base import Config, ConfigError
from ._base import Config
DEFAULT_LOG_CONFIG = Template(
"""\
# Log configuration for Synapse.
#
# This is a YAML file containing a standard Python logging configuration
# dictionary. See [1] for details on the valid settings.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
"""
version: 1
formatters:
@@ -88,18 +81,11 @@ disable_existing_loggers: false
"""
)
LOG_FILE_ERROR = """\
Support for the log_file configuration option and --log-file command-line option was
removed in Synapse 1.3.0. You should instead set up a separate log configuration file.
"""
class LoggingConfig(Config):
section = "logging"
def read_config(self, config, **kwargs):
if config.get("log_file"):
raise ConfigError(LOG_FILE_ERROR)
self.log_config = self.abspath(config.get("log_config"))
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
@@ -120,8 +106,6 @@ class LoggingConfig(Config):
def read_arguments(self, args):
if args.no_redirect_stdio is not None:
self.no_redirect_stdio = args.no_redirect_stdio
if args.log_file is not None:
raise ConfigError(LOG_FILE_ERROR)
@staticmethod
def add_arguments(parser):
@@ -134,10 +118,6 @@ class LoggingConfig(Config):
help="Do not redirect stdout/stderr to the log",
)
logging_group.add_argument(
"-f", "--log-file", dest="log_file", help=argparse.SUPPRESS,
)
def generate_files(self, config, config_dir_path):
log_config = config.get("log_config")
if log_config and not os.path.exists(log_config):
@@ -202,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None):
logger = logging.getLogger("")
if not log_config:
logger.warning("Reloaded a blank config?")
logger.warn("Reloaded a blank config?")
logging.config.dictConfig(log_config)
@@ -254,8 +234,8 @@ def setup_logging(
# make sure that the first thing we log is a thing we can grep backwards
# for
logging.warning("***** STARTING SERVER *****")
logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
logging.warn("***** STARTING SERVER *****")
logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
logging.info("Server hostname: %s", config.server_name)
return logger

View File

@@ -83,9 +83,10 @@ class RatelimitConfig(Config):
)
rc_admin_redaction = config.get("rc_admin_redaction")
self.rc_admin_redaction = None
if rc_admin_redaction:
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
else:
self.rc_admin_redaction = None
def generate_config_section(self, **kwargs):
return """\

View File

@@ -106,13 +106,6 @@ class RegistrationConfig(Config):
account_threepid_delegates = config.get("account_threepid_delegates") or {}
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
if self.account_threepid_delegate_msisdn and not self.public_baseurl:
raise ConfigError(
"The configuration option `public_baseurl` is required if "
"`account_threepid_delegate.msisdn` is set, such that "
"clients know where to submit validation tokens to. Please "
"configure `public_baseurl`."
)
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@@ -307,7 +300,7 @@ class RegistrationConfig(Config):
# If a delegate is specified, the config option public_baseurl must also be filled out.
#
account_threepid_delegates:
#email: https://example.com # Delegate email sending to example.com
#email: https://example.com # Delegate email sending to example.org
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
# Users who register on this homeserver will automatically be joined

View File

@@ -156,6 +156,7 @@ class ContentRepositoryConfig(Config):
(provider_class, parsed_config, wrapper_config)
)
self.uploads_path = self.ensure_directory(config.get("uploads_path", "uploads"))
self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
self.thumbnail_requirements = parse_thumbnail_requirements(
config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)
@@ -230,6 +231,10 @@ class ContentRepositoryConfig(Config):
# config:
# directory: /mnt/some/other/directory
# Directory where in-progress uploads are stored.
#
uploads_path: "%(uploads_path)s"
# The largest allowed upload size in bytes
#
#max_upload_size: 10M

View File

@@ -170,7 +170,7 @@ class _RoomDirectoryRule(object):
self.action = action
else:
raise ConfigError(
"%s rules can only have action of 'allow' or 'deny'" % (option_name,)
"%s rules can only have action of 'allow'" " or 'deny'" % (option_name,)
)
self._alias_matches_all = alias == "*"

View File

@@ -14,19 +14,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module, load_python_module
from synapse.types import (
map_username_to_mxid_localpart,
mxid_localpart_allowed_characters,
)
from synapse.util.module_loader import load_python_module
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
DEFAULT_USER_MAPPING_PROVIDER = (
"synapse.handlers.saml_handler.DefaultSamlMappingProvider"
)
def _dict_merge(merge_dict, into_dict):
"""Do a deep merge of two dicts
@@ -77,69 +75,15 @@ class SAML2Config(Config):
self.saml2_enabled = True
self.saml2_mxid_source_attribute = saml2_config.get(
"mxid_source_attribute", "uid"
)
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
"grandfathered_mxid_source_attribute", "uid"
)
# user_mapping_provider may be None if the key is present but has no value
ump_dict = saml2_config.get("user_mapping_provider") or {}
# Use the default user mapping provider if not set
ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
# Ensure a config is present
ump_dict["config"] = ump_dict.get("config") or {}
if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER:
# Load deprecated options for use by the default module
old_mxid_source_attribute = saml2_config.get("mxid_source_attribute")
if old_mxid_source_attribute:
logger.warning(
"The config option saml2_config.mxid_source_attribute is deprecated. "
"Please use saml2_config.user_mapping_provider.config"
".mxid_source_attribute instead."
)
ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute
old_mxid_mapping = saml2_config.get("mxid_mapping")
if old_mxid_mapping:
logger.warning(
"The config option saml2_config.mxid_mapping is deprecated. Please "
"use saml2_config.user_mapping_provider.config.mxid_mapping instead."
)
ump_dict["config"]["mxid_mapping"] = old_mxid_mapping
# Retrieve an instance of the module's class
# Pass the config dictionary to the module for processing
(
self.saml2_user_mapping_provider_class,
self.saml2_user_mapping_provider_config,
) = load_module(ump_dict)
# Ensure loaded user mapping module has defined all necessary methods
# Note parse_config() is already checked during the call to load_module
required_methods = [
"get_saml_attributes",
"saml_response_to_user_attributes",
]
missing_methods = [
method
for method in required_methods
if not hasattr(self.saml2_user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class specified by saml2_config."
"user_mapping_provider.module is missing required "
"methods: %s" % (", ".join(missing_methods),)
)
# Get the desired saml auth response attributes from the module
saml2_config_dict = self._default_saml_config_dict(
*self.saml2_user_mapping_provider_class.get_saml_attributes(
self.saml2_user_mapping_provider_config
)
)
saml2_config_dict = self._default_saml_config_dict()
_dict_merge(
merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict
)
@@ -159,27 +103,22 @@ class SAML2Config(Config):
saml2_config.get("saml_session_lifetime", "5m")
)
def _default_saml_config_dict(
self, required_attributes: set, optional_attributes: set
):
"""Generate a configuration dictionary with required and optional attributes that
will be needed to process new user registration
mapping = saml2_config.get("mxid_mapping", "hexencode")
try:
self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping]
except KeyError:
raise ConfigError("%s is not a known mxid_mapping" % (mapping,))
Args:
required_attributes: SAML auth response attributes that are
necessary to function
optional_attributes: SAML auth response attributes that can be used to add
additional information to Synapse user accounts, but are not required
Returns:
dict: A SAML configuration dictionary
"""
def _default_saml_config_dict(self):
import saml2
public_baseurl = self.public_baseurl
if public_baseurl is None:
raise ConfigError("saml2_config requires a public_baseurl to be set")
required_attributes = {"uid", self.saml2_mxid_source_attribute}
optional_attributes = {"displayName"}
if self.saml2_grandfathered_mxid_source_attribute:
optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
optional_attributes -= required_attributes
@@ -268,58 +207,33 @@ class SAML2Config(Config):
#
#config_path: "%(config_dir_path)s/sp_conf.py"
# The lifetime of a SAML session. This defines how long a user has to
# the lifetime of a SAML session. This defines how long a user has to
# complete the authentication process, if allow_unsolicited is unset.
# The default is 5 minutes.
#
#saml_session_lifetime: 5m
# An external module can be provided here as a custom solution to
# mapping attributes returned from a saml provider onto a matrix user.
# The SAML attribute (after mapping via the attribute maps) to use to derive
# the Matrix ID from. 'uid' by default.
#
user_mapping_provider:
# The custom module's class. Uncomment to use a custom module.
#
#module: mapping_provider.SamlMappingProvider
#mxid_source_attribute: displayName
# Custom configuration values for the module. Below options are
# intended for the built-in provider, they should be changed if
# using a custom module. This section will be passed as a Python
# dictionary to the module's `parse_config` method.
#
config:
# The SAML attribute (after mapping via the attribute maps) to use
# to derive the Matrix ID from. 'uid' by default.
#
# Note: This used to be configured by the
# saml2_config.mxid_source_attribute option. If that is still
# defined, its value will be used instead.
#
#mxid_source_attribute: displayName
# The mapping system to use for mapping the saml attribute onto a matrix ID.
# Options include:
# * 'hexencode' (which maps unpermitted characters to '=xx')
# * 'dotreplace' (which replaces unpermitted characters with '.').
# The default is 'hexencode'.
#
#mxid_mapping: dotreplace
# The mapping system to use for mapping the saml attribute onto a
# matrix ID.
#
# Options include:
# * 'hexencode' (which maps unpermitted characters to '=xx')
# * 'dotreplace' (which replaces unpermitted characters with
# '.').
# The default is 'hexencode'.
#
# Note: This used to be configured by the
# saml2_config.mxid_mapping option. If that is still defined, its
# value will be used instead.
#
#mxid_mapping: dotreplace
# In previous versions of synapse, the mapping from SAML attribute to
# MXID was always calculated dynamically rather than stored in a
# table. For backwards- compatibility, we will look for user_ids
# matching such a pattern before creating a new account.
# In previous versions of synapse, the mapping from SAML attribute to MXID was
# always calculated dynamically rather than stored in a table. For backwards-
# compatibility, we will look for user_ids matching such a pattern before
# creating a new account.
#
# This setting controls the SAML attribute which will be used for this
# backwards-compatibility lookup. Typically it should be 'uid', but if
# the attribute maps are changed, it may be necessary to change it.
# backwards-compatibility lookup. Typically it should be 'uid', but if the
# attribute maps are changed, it may be necessary to change it.
#
# The default is 'uid'.
#
@@ -327,3 +241,23 @@ class SAML2Config(Config):
""" % {
"config_dir_path": config_dir_path
}
DOT_REPLACE_PATTERN = re.compile(
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
)
def dot_replace_for_mxid(username: str) -> str:
username = username.lower()
username = DOT_REPLACE_PATTERN.sub(".", username)
# regular mxids aren't allowed to start with an underscore either
username = re.sub("^_", "", username)
return username
MXID_MAPPER_MAP = {
"hexencode": map_username_to_mxid_localpart,
"dotreplace": dot_replace_for_mxid,
}

View File

@@ -19,7 +19,7 @@ import logging
import os.path
import re
from textwrap import indent
from typing import Dict, List, Optional
from typing import List
import attr
import yaml
@@ -41,7 +41,7 @@ logger = logging.Logger(__name__)
# in the list.
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
DEFAULT_ROOM_VERSION = "5"
DEFAULT_ROOM_VERSION = "4"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
@@ -102,12 +102,6 @@ class ServerConfig(Config):
"require_auth_for_profile_requests", False
)
# Whether to require sharing a room with a user to retrieve their
# profile data
self.limit_profile_requests_to_users_who_share_rooms = config.get(
"limit_profile_requests_to_users_who_share_rooms", False,
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
@@ -124,16 +118,15 @@ class ServerConfig(Config):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
# If set to 'False', requires authentication to access the server's public
# rooms directory through the client API. Defaults to 'True'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", False
"allow_public_rooms_without_auth", True
)
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
# If set to 'False', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'True'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", False
"allow_public_rooms_over_federation", True
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
@@ -206,7 +199,7 @@ class ServerConfig(Config):
self.admin_contact = config.get("admin_contact", None)
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None # type: Optional[dict]
self.federation_domain_whitelist = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
if federation_domain_whitelist is not None:
@@ -230,7 +223,7 @@ class ServerConfig(Config):
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
except Exception as e:
raise ConfigError(
"Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
"Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e
)
if self.public_baseurl is not None:
@@ -253,124 +246,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
{
"interval": interval,
"shortest_max_lifetime": shortest_max_lifetime,
"longest_max_lifetime": longest_max_lifetime,
}
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
{
"interval": self.parse_duration("1d"),
"shortest_max_lifetime": None,
"longest_max_lifetime": None,
}
]
self.listeners = [] # type: List[dict]
for listener in config.get("listeners", []):
if not isinstance(listener.get("port", None), int):
@@ -497,8 +372,6 @@ class ServerConfig(Config):
"cleanup_extremities_with_dummy_events", True
)
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
def has_tls_listener(self) -> bool:
return any(l["tls"] for l in self.listeners)
@@ -627,23 +500,15 @@ class ServerConfig(Config):
#
#require_auth_for_profile_requests: true
# Uncomment to require a user to share a room with another user in order
# to retrieve their profile information. Only checked on Client-Server
# requests. Profile requests from other servers should be checked by the
# requesting server. Defaults to 'false'.
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
#
#limit_profile_requests_to_users_who_share_rooms: true
#allow_public_rooms_without_auth: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
# If set to 'false', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'true'.
#
#allow_public_rooms_without_auth: true
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
#
#allow_public_rooms_over_federation: true
#allow_public_rooms_over_federation: false
# The default room version for newly created rooms.
#
@@ -856,7 +721,7 @@ class ServerConfig(Config):
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained homeserver Settings
# Resource-constrained Homeserver Settings
#
# If limit_remote_rooms.enabled is True, the room complexity will be
# checked before a user joins a new remote room. If it is above
@@ -896,69 +761,6 @@ class ServerConfig(Config):
# Defaults to `28d`. Set to `null` to disable clearing out of old rows.
#
#user_ips_max_age: 14d
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, a user won't be able to send a
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
# that's not within this range. This is especially useful in closed federations,
# in which server admins can make sure every federating server applies the same
# rules.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
# that purge to be performed by a job that's iterating over every room it knows,
# which would be quite heavy on the server.
#
#purge_jobs:
# - shortest_max_lifetime: 1d
# longest_max_lifetime: 3d
# interval: 5m:
# - shortest_max_lifetime: 3d
# longest_max_lifetime: 1y
# interval: 24h
"""
% locals()
)
@@ -979,20 +781,20 @@ class ServerConfig(Config):
"--daemonize",
action="store_true",
default=None,
help="Daemonize the homeserver",
help="Daemonize the home server",
)
server_group.add_argument(
"--print-pidfile",
action="store_true",
default=None,
help="Print the path to the pidfile just before daemonizing",
help="Print the path to the pidfile just" " before daemonizing",
)
server_group.add_argument(
"--manhole",
metavar="PORT",
dest="manhole",
type=int,
help="Turn on the twisted telnet manhole service on the given port.",
help="Turn on the twisted telnet manhole" " service on the given port.",
)

View File

@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import hashlib
import logging
@@ -40,11 +40,8 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.
hashes = event.get("hashes")
# nb it might be a frozendict or a dict
if not isinstance(hashes, collections.abc.Mapping):
raise SynapseError(
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
)
if not isinstance(hashes, dict):
raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
if name not in hashes:
raise SynapseError(
@@ -128,11 +125,9 @@ def compute_event_signature(event_dict, signature_name, signing_key):
redact_json = prune_event_dict(event_dict)
redact_json.pop("age_ts", None)
redact_json.pop("unsigned", None)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
redact_json = sign_json(redact_json, signature_name, signing_key)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
return redact_json["signatures"]

View File

@@ -511,18 +511,17 @@ class BaseV2KeyFetcher(object):
server_name = response_json["server_name"]
verified = False
for key_id in response_json["signatures"].get(server_name, {}):
# each of the keys used for the signature must be present in the response
# json.
key = verify_keys.get(key_id)
if not key:
# the key may not be present in verify_keys if:
# * we got the key from the notary server, and:
# * the key belongs to the notary server, and:
# * the notary server is using a different key to sign notary
# responses.
continue
raise KeyLookupError(
"Key response is signed by key id %s:%s but that key is not "
"present in the response" % (server_name, key_id)
)
verify_signed_json(response_json, server_name, key.verify_key)
verified = True
break
if not verified:
raise KeyLookupError(

View File

@@ -14,7 +14,6 @@
# limitations under the License.
import logging
from typing import Set, Tuple
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -43,26 +42,12 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
Returns:
if the auth checks pass.
"""
assert isinstance(auth_events, dict)
if do_size_check:
_check_size_limits(event)
if not hasattr(event, "room_id"):
raise AuthError(500, "Event has no room_id: %s" % event)
room_id = event.room_id
# I'm not really expecting to get auth events in the wrong room, but let's
# sanity-check it
for auth_event in auth_events.values():
if auth_event.room_id != room_id:
raise Exception(
"During auth for event %s in room %s, found event %s in the state "
"which is in room %s"
% (event.event_id, room_id, auth_event.event_id, auth_event.room_id)
)
if do_sig_check:
sender_domain = get_domain_from_id(event.sender)
@@ -89,6 +74,12 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
if not event.signatures.get(event_id_domain):
raise AuthError(403, "Event not signed by sending server")
if auth_events is None:
# Oh, we don't know what the state of the room was, so we
# are trusting that this is allowed (at least for now)
logger.warn("Trusting event: %s", event.event_id)
return
if event.type == EventTypes.Create:
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
@@ -634,7 +625,7 @@ def get_public_keys(invite_event):
return public_keys
def auth_types_for_event(event) -> Set[Tuple[str]]:
def auth_types_for_event(event):
"""Given an event, return a list of (EventType, StateKey) that may be
needed to auth the event. The returned list may be a superset of what
would actually be required depending on the full state of the room.
@@ -643,20 +634,20 @@ def auth_types_for_event(event) -> Set[Tuple[str]]:
actually auth the event.
"""
if event.type == EventTypes.Create:
return set()
return []
auth_types = {
auth_types = [
(EventTypes.PowerLevels, ""),
(EventTypes.Member, event.sender),
(EventTypes.Create, ""),
}
]
if event.type == EventTypes.Member:
membership = event.content["membership"]
if membership in [Membership.JOIN, Membership.INVITE]:
auth_types.add((EventTypes.JoinRules, ""))
auth_types.append((EventTypes.JoinRules, ""))
auth_types.add((EventTypes.Member, event.state_key))
auth_types.append((EventTypes.Member, event.state_key))
if membership == Membership.INVITE:
if "third_party_invite" in event.content:
@@ -664,6 +655,6 @@ def auth_types_for_event(event) -> Set[Tuple[str]]:
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"],
)
auth_types.add(key)
auth_types.append(key)
return auth_types

View File

@@ -12,125 +12,104 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Tuple, Union
from six import iteritems
import attr
from frozendict import frozendict
from twisted.internet import defer
from synapse.appservice import ApplicationService
from synapse.logging.context import make_deferred_yieldable, run_in_background
@attr.s(slots=True)
class EventContext:
class EventContext(object):
"""
Holds information relevant to persisting an event
Attributes:
rejected: A rejection reason if the event was rejected, else False
state_group (int|None): state group id, if the state has been stored
as a state group. This is usually only None if e.g. the event is
an outlier.
rejected (bool|str): A rejection reason if the event was rejected, else
False
_state_group: The ID of the state group for this event. Note that state events
are persisted with a state group which includes the new event, so this is
effectively the state *after* the event in question.
push_actions (list[(str, list[object])]): list of (user_id, actions)
tuples
For a *rejected* state event, where the state of the rejected event is
ignored, this state_group should never make it into the
event_to_state_groups table. Indeed, inspecting this value for a rejected
state event is almost certainly incorrect.
prev_group (int): Previously persisted state group. ``None`` for an
outlier.
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
(type, state_key) -> event_id. ``None`` for an outlier.
For an outlier, where we don't have the state at the event, this will be
None.
Note that this is a private attribute: it should be accessed via
the ``state_group`` property.
state_group_before_event: The ID of the state group representing the state
of the room before this event.
If this is a non-state event, this will be the same as ``state_group``. If
it's a state event, it will be the same as ``prev_group``.
If ``state_group`` is None (ie, the event is an outlier),
``state_group_before_event`` will always also be ``None``.
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
None does not necessarily mean that ``state_group`` does not have
a prev_group!
If the event is a state event, this is normally the same as ``prev_group``.
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
will always also be ``None``.
Note that this *not* (necessarily) the state group associated with
``_prev_state_ids``.
delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
and ``state_group``.
app_service: If this event is being sent by a (local) application service, that
app service.
_current_state_ids: The room state map, including this event - ie, the state
in ``state_group``.
prev_state_events (?): XXX: is this ever set to anything other than
the empty list?
_current_state_ids (dict[(str, str), str]|None):
The current state map including the current event. None if outlier
or we haven't fetched the state from DB yet.
(type, state_key) -> event_id
FIXME: what is this for an outlier? it seems ill-defined. It seems like
it could be either {}, or the state we were given by the remote
server, depending on $THINGS
Note that this is a private attribute: it should be accessed via
``get_current_state_ids``. _AsyncEventContext impl calculates this
on-demand: it will be None until that happens.
_prev_state_ids: The room state map, excluding this event - ie, the state
in ``state_group_before_event``. For a non-state
event, this will be the same as _current_state_events.
Note that it is a completely different thing to prev_group!
_prev_state_ids (dict[(str, str), str]|None):
The current state map excluding the current event. None if outlier
or we haven't fetched the state from DB yet.
(type, state_key) -> event_id
FIXME: again, what is this for an outlier?
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
been calculated. None if we haven't started calculating yet
As with _current_state_ids, this is a private attribute. It should be
accessed via get_prev_state_ids.
_event_type (str): The type of the event the context is associated with.
Only set when state has not been fetched yet.
_event_state_key (str|None): The state_key of the event the context is
associated with. Only set when state has not been fetched yet.
_prev_state_id (str|None): If the event associated with the context is
a state event, then `_prev_state_id` is the event_id of the state
that was replaced.
Only set when state has not been fetched yet.
"""
rejected = attr.ib(default=False, type=Union[bool, str])
_state_group = attr.ib(default=None, type=Optional[int])
state_group_before_event = attr.ib(default=None, type=Optional[int])
prev_group = attr.ib(default=None, type=Optional[int])
delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
app_service = attr.ib(default=None, type=Optional[ApplicationService])
__slots__ = [
"state_group",
"rejected",
"prev_group",
"delta_ids",
"prev_state_events",
"app_service",
"_current_state_ids",
"_prev_state_ids",
"_prev_state_id",
"_event_type",
"_event_state_key",
"_fetching_state_deferred",
]
_current_state_ids = attr.ib(
default=None, type=Optional[Dict[Tuple[str, str], str]]
)
_prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
def __init__(self):
self.prev_state_events = []
self.rejected = False
self.app_service = None
@staticmethod
def with_state(
state_group,
state_group_before_event,
current_state_ids,
prev_state_ids,
prev_group=None,
delta_ids=None,
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
):
return EventContext(
current_state_ids=current_state_ids,
prev_state_ids=prev_state_ids,
state_group=state_group,
state_group_before_event=state_group_before_event,
prev_group=prev_group,
delta_ids=delta_ids,
)
context = EventContext()
# The current state including the current event
context._current_state_ids = current_state_ids
# The current state excluding the current event
context._prev_state_ids = prev_state_ids
context.state_group = state_group
context._prev_state_id = None
context._event_type = None
context._event_state_key = None
context._fetching_state_deferred = defer.succeed(None)
# A previously persisted state group and a delta between that
# and this state.
context.prev_group = prev_group
context.delta_ids = delta_ids
return context
@defer.inlineCallbacks
def serialize(self, event, store):
@@ -149,7 +128,7 @@ class EventContext:
# the prev_state_ids, so if we're a state event we include the event
# id that we replaced in the state.
if event.is_state():
prev_state_ids = yield self.get_prev_state_ids()
prev_state_ids = yield self.get_prev_state_ids(store)
prev_state_id = prev_state_ids.get((event.type, event.state_key))
else:
prev_state_id = None
@@ -158,93 +137,74 @@ class EventContext:
"prev_state_id": prev_state_id,
"event_type": event.type,
"event_state_key": event.state_key if event.is_state() else None,
"state_group": self._state_group,
"state_group_before_event": self.state_group_before_event,
"state_group": self.state_group,
"rejected": self.rejected,
"prev_group": self.prev_group,
"delta_ids": _encode_state_dict(self.delta_ids),
"prev_state_events": self.prev_state_events,
"app_service_id": self.app_service.id if self.app_service else None,
}
@staticmethod
def deserialize(storage, input):
def deserialize(store, input):
"""Converts a dict that was produced by `serialize` back into a
EventContext.
Args:
storage (Storage): Used to convert AS ID to AS object and fetch
state.
store (DataStore): Used to convert AS ID to AS object
input (dict): A dict produced by `serialize`
Returns:
EventContext
"""
context = _AsyncEventContextImpl(
# We use the state_group and prev_state_id stuff to pull the
# current_state_ids out of the DB and construct prev_state_ids.
storage=storage,
prev_state_id=input["prev_state_id"],
event_type=input["event_type"],
event_state_key=input["event_state_key"],
state_group=input["state_group"],
state_group_before_event=input["state_group_before_event"],
prev_group=input["prev_group"],
delta_ids=_decode_state_dict(input["delta_ids"]),
rejected=input["rejected"],
)
context = EventContext()
# We use the state_group and prev_state_id stuff to pull the
# current_state_ids out of the DB and construct prev_state_ids.
context._prev_state_id = input["prev_state_id"]
context._event_type = input["event_type"]
context._event_state_key = input["event_state_key"]
context._current_state_ids = None
context._prev_state_ids = None
context._fetching_state_deferred = None
context.state_group = input["state_group"]
context.prev_group = input["prev_group"]
context.delta_ids = _decode_state_dict(input["delta_ids"])
context.rejected = input["rejected"]
context.prev_state_events = input["prev_state_events"]
app_service_id = input["app_service_id"]
if app_service_id:
context.app_service = storage.main.get_app_service_by_id(app_service_id)
context.app_service = store.get_app_service_by_id(app_service_id)
return context
@property
def state_group(self) -> Optional[int]:
"""The ID of the state group for this event.
Note that state events are persisted with a state group which includes the new
event, so this is effectively the state *after* the event in question.
For an outlier, where we don't have the state at the event, this will be None.
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. Accessing this property will raise an exception
if ``rejected`` is set.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_group of rejected event")
return self._state_group
@defer.inlineCallbacks
def get_current_state_ids(self):
"""
Gets the room state map, including this event - ie, the state in ``state_group``
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. This method will raise an exception if
``rejected`` is set.
def get_current_state_ids(self, store):
"""Gets the current state IDs
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
is None, which happens when the associated event is an outlier.
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_ids of rejected event")
yield self._ensure_fetched()
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(
self._fill_out_state, store
)
yield make_deferred_yieldable(self._fetching_state_deferred)
return self._current_state_ids
@defer.inlineCallbacks
def get_prev_state_ids(self):
"""
Gets the room state map, excluding this event.
For a non-state event, this will be the same as get_current_state_ids().
def get_prev_state_ids(self, store):
"""Gets the prev state IDs
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
@@ -252,77 +212,36 @@ class EventContext:
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
yield self._ensure_fetched()
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(
self._fill_out_state, store
)
yield make_deferred_yieldable(self._fetching_state_deferred)
return self._prev_state_ids
def get_cached_current_state_ids(self):
"""Gets the current state IDs if we have them already cached.
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. This method will raise an exception if
``rejected`` is set.
Returns:
dict[(str, str), str]|None: Returns None if we haven't cached the
state or if state_group is None, which happens when the associated
event is an outlier.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_ids of rejected event")
return self._current_state_ids
def _ensure_fetched(self):
return defer.succeed(None)
@attr.s(slots=True)
class _AsyncEventContextImpl(EventContext):
"""
An implementation of EventContext which fetches _current_state_ids and
_prev_state_ids from the database on demand.
Attributes:
_storage (Storage)
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
been calculated. None if we haven't started calculating yet
_event_type (str): The type of the event the context is associated with.
_event_state_key (str): The state_key of the event the context is
associated with.
_prev_state_id (str|None): If the event associated with the context is
a state event, then `_prev_state_id` is the event_id of the state
that was replaced.
"""
# This needs to have a default as we're inheriting
_storage = attr.ib(default=None)
_prev_state_id = attr.ib(default=None)
_event_type = attr.ib(default=None)
_event_state_key = attr.ib(default=None)
_fetching_state_deferred = attr.ib(default=None)
def _ensure_fetched(self):
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(self._fill_out_state)
return make_deferred_yieldable(self._fetching_state_deferred)
@defer.inlineCallbacks
def _fill_out_state(self):
def _fill_out_state(self, store):
"""Called to populate the _current_state_ids and _prev_state_ids
attributes by loading from the database.
"""
if self.state_group is None:
return
self._current_state_ids = yield self._storage.state.get_state_ids_for_group(
self.state_group
)
self._current_state_ids = yield store.get_state_ids_for_group(self.state_group)
if self._prev_state_id and self._event_state_key is not None:
self._prev_state_ids = dict(self._current_state_ids)
@@ -331,6 +250,27 @@ class _AsyncEventContextImpl(EventContext):
else:
self._prev_state_ids = self._current_state_ids
@defer.inlineCallbacks
def update_state(
self, state_group, prev_state_ids, current_state_ids, prev_group, delta_ids
):
"""Replace the state in the context
"""
# We need to make sure we wait for any ongoing fetching of state
# to complete so that the updated state doesn't get clobbered
if self._fetching_state_deferred:
yield make_deferred_yieldable(self._fetching_state_deferred)
self.state_group = state_group
self._prev_state_ids = prev_state_ids
self.prev_group = prev_group
self._current_state_ids = current_state_ids
self.delta_ids = delta_ids
# We need to ensure that that we've marked as having fetched the state
self._fetching_state_deferred = defer.succeed(None)
def _encode_state_dict(state_dict):
"""Since dicts of (type, state_key) -> event_id cannot be serialized in

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,10 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from synapse.spam_checker_api import SpamCheckerApi
class SpamChecker(object):
def __init__(self, hs):
@@ -31,14 +26,7 @@ class SpamChecker(object):
pass
if module is not None:
# Older spam checkers don't accept the `api` argument, so we
# try and detect support.
spam_args = inspect.getfullargspec(module)
if "api" in spam_args.args:
api = SpamCheckerApi(hs)
self.spam_checker = module(config=config, api=api)
else:
self.spam_checker = module(config=config)
self.spam_checker = module(config=config)
def check_event_for_spam(self, event):
"""Checks if a given event is considered "spammy" by this server.

View File

@@ -53,7 +53,7 @@ class ThirdPartyEventRules(object):
if self.third_party_rules is None:
return True
prev_state_ids = yield context.get_prev_state_ids()
prev_state_ids = yield context.get_prev_state_ids(self.store)
# Retrieve the state events from the database.
state_events = {}

View File

@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from six import integer_types, string_types
from six import string_types
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -22,12 +22,11 @@ from synapse.types import EventID, RoomID, UserID
class EventValidator(object):
def validate_new(self, event, config):
def validate_new(self, event):
"""Validates the event has roughly the right format
Args:
event (FrozenEvent): The event to validate.
config (Config): The homeserver's configuration.
event (FrozenEvent)
"""
self.validate_builder(event)
@@ -68,99 +67,6 @@ class EventValidator(object):
Codes.INVALID_PARAM,
)
if event.type == EventTypes.Retention:
self._validate_retention(event, config)
def _validate_retention(self, event, config):
"""Checks that an event that defines the retention policy for a room respects the
boundaries imposed by the server's administrator.
Args:
event (FrozenEvent): The event to validate.
config (Config): The homeserver's configuration.
"""
min_lifetime = event.content.get("min_lifetime")
max_lifetime = event.content.get("max_lifetime")
if min_lifetime is not None:
if not isinstance(min_lifetime, integer_types):
raise SynapseError(
code=400,
msg="'min_lifetime' must be an integer",
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_min is not None
and min_lifetime < config.retention_allowed_lifetime_min
):
raise SynapseError(
code=400,
msg=(
"'min_lifetime' can't be lower than the minimum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_max is not None
and min_lifetime > config.retention_allowed_lifetime_max
):
raise SynapseError(
code=400,
msg=(
"'min_lifetime' can't be greater than the maximum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if max_lifetime is not None:
if not isinstance(max_lifetime, integer_types):
raise SynapseError(
code=400,
msg="'max_lifetime' must be an integer",
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_min is not None
and max_lifetime < config.retention_allowed_lifetime_min
):
raise SynapseError(
code=400,
msg=(
"'max_lifetime' can't be lower than the minimum allowed value"
" enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_max is not None
and max_lifetime > config.retention_allowed_lifetime_max
):
raise SynapseError(
code=400,
msg=(
"'max_lifetime' can't be greater than the maximum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
min_lifetime is not None
and max_lifetime is not None
and min_lifetime > max_lifetime
):
raise SynapseError(
code=400,
msg="'min_lifetime' can't be greater than 'max_lifetime",
errcode=Codes.BAD_JSON,
)
def validate_builder(self, event):
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the

View File

@@ -102,7 +102,7 @@ class FederationBase(object):
pass
if not res:
logger.warning(
logger.warn(
"Failed to find copy of %s with valid signature", pdu.event_id
)
@@ -173,7 +173,7 @@ class FederationBase(object):
return redacted_event
if self.spam_checker.check_event_for_spam(pdu):
logger.warning(
logger.warn(
"Event contains spam, redacting %s: %s",
pdu.event_id,
pdu.get_pdu_json(),
@@ -185,7 +185,7 @@ class FederationBase(object):
def errback(failure, pdu):
failure.trap(SynapseError)
with PreserveLoggingContext(ctx):
logger.warning(
logger.warn(
"Signature check failed for %s: %s",
pdu.event_id,
failure.getErrorMessage(),

View File

@@ -18,6 +18,8 @@ import copy
import itertools
import logging
from six.moves import range
from prometheus_client import Counter
from twisted.internet import defer
@@ -37,7 +39,7 @@ from synapse.api.room_versions import (
)
from synapse.events import builder, room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.utils import log_function
from synapse.util import unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
@@ -175,7 +177,7 @@ class FederationClient(FederationBase):
given destination server.
Args:
dest (str): The remote homeserver to ask.
dest (str): The remote home server to ask.
room_id (str): The room_id to backfill.
limit (int): The maximum number of PDUs to return.
extremities (list): List of PDU id and origins of the first pdus
@@ -194,7 +196,7 @@ class FederationClient(FederationBase):
dest, room_id, extremities, limit
)
logger.debug("backfill transaction_data=%r", transaction_data)
logger.debug("backfill transaction_data=%s", repr(transaction_data))
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
@@ -225,7 +227,7 @@ class FederationClient(FederationBase):
one succeeds.
Args:
destinations (list): Which homeservers to query
destinations (list): Which home servers to query
event_id (str): event to fetch
room_version (str): version of the room
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
@@ -308,26 +310,162 @@ class FederationClient(FederationBase):
return signed_pdu
@defer.inlineCallbacks
def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
"""Calls the /state_ids endpoint to fetch the state at a particular point
in the room, and the auth events for the given event
@log_function
def get_state_for_room(self, destination, room_id, event_id):
"""Requests all of the room state at a given event from a remote home server.
Args:
destination (str): The remote homeserver to query for the state.
room_id (str): The id of the room we're interested in.
event_id (str): The id of the event we want the state at.
Returns:
Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
Deferred[Tuple[List[EventBase], List[EventBase]]]:
A list of events in the state, and a list of events in the auth chain
for the given event.
"""
result = yield self.transport_layer.get_room_state_ids(
try:
# First we try and ask for just the IDs, as thats far quicker if
# we have most of the state and auth_chain already.
# However, this may 404 if the other side has an old synapse.
result = yield self.transport_layer.get_room_state_ids(
destination, room_id, event_id=event_id
)
state_event_ids = result["pdu_ids"]
auth_event_ids = result.get("auth_chain_ids", [])
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
destination, room_id, set(state_event_ids + auth_event_ids)
)
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state/auth events for %s: %s",
room_id,
failed_to_fetch,
)
event_map = {ev.event_id: ev for ev in fetched_events}
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
auth_chain = [
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
]
auth_chain.sort(key=lambda e: e.depth)
return pdus, auth_chain
except HttpResponseException as e:
if e.code == 400 or e.code == 404:
logger.info("Failed to use get_room_state_ids API, falling back")
else:
raise e
result = yield self.transport_layer.get_room_state(
destination, room_id, event_id=event_id
)
state_event_ids = result["pdu_ids"]
auth_event_ids = result.get("auth_chain_ids", [])
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
if not isinstance(state_event_ids, list) or not isinstance(
auth_event_ids, list
):
raise Exception("invalid response from /state_ids")
pdus = [
event_from_pdu_json(p, format_ver, outlier=True) for p in result["pdus"]
]
return state_event_ids, auth_event_ids
auth_chain = [
event_from_pdu_json(p, format_ver, outlier=True)
for p in result.get("auth_chain", [])
]
seen_events = yield self.store.get_events(
[ev.event_id for ev in itertools.chain(pdus, auth_chain)]
)
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
destination,
[p for p in pdus if p.event_id not in seen_events],
outlier=True,
room_version=room_version,
)
signed_pdus.extend(
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
)
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination,
[p for p in auth_chain if p.event_id not in seen_events],
outlier=True,
room_version=room_version,
)
signed_auth.extend(
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
)
signed_auth.sort(key=lambda e: e.depth)
return signed_pdus, signed_auth
@defer.inlineCallbacks
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
"""Fetch events from a remote destination, checking if we already have them.
Args:
destination (str)
room_id (str)
event_ids (list)
Returns:
Deferred: A deferred resolving to a 2-tuple where the first is a list of
events and the second is a list of event ids that we failed to fetch.
"""
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
failed_to_fetch = set()
missing_events = set(event_ids)
for k in seen_events:
missing_events.discard(k)
if not missing_events:
return signed_events, failed_to_fetch
logger.debug(
"Fetching unknown state/auth events %s for room %s",
missing_events,
event_ids,
)
room_version = yield self.store.get_room_version(room_id)
batch_size = 20
missing_events = list(missing_events)
for i in range(0, len(missing_events), batch_size):
batch = set(missing_events[i : i + batch_size])
deferreds = [
run_in_background(
self.get_pdu,
destinations=[destination],
event_id=e_id,
room_version=room_version,
)
for e_id in batch
]
res = yield make_deferred_yieldable(
defer.DeferredList(deferreds, consumeErrors=True)
)
for success, result in res:
if success and result:
signed_events.append(result)
batch.discard(result.event_id)
# We removed all events we successfully fetched from `batch`
failed_to_fetch.update(batch)
return signed_events, failed_to_fetch
@defer.inlineCallbacks
@log_function
@@ -384,12 +522,12 @@ class FederationClient(FederationBase):
res = yield callback(destination)
return res
except InvalidResponseError as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
logger.warn("Failed to %s via %s: %s", description, destination, e)
except HttpResponseException as e:
if not 500 <= e.code < 600:
raise e.to_synapse_error()
else:
logger.warning(
logger.warn(
"Failed to %s via %s: %i %s",
description,
destination,
@@ -397,9 +535,7 @@ class FederationClient(FederationBase):
e.args[0],
)
except Exception:
logger.warning(
"Failed to %s via %s", description, destination, exc_info=1
)
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
raise SynapseError(502, "Failed to %s via any server" % (description,))
@@ -417,7 +553,7 @@ class FederationClient(FederationBase):
Note that this does not append any events to any graphs.
Args:
destinations (Iterable[str]): Candidate homeservers which are probably
destinations (str): Candidate homeservers which are probably
participating in the room.
room_id (str): The room in which the event will happen.
user_id (str): The user whose membership is being evented.
@@ -526,7 +662,13 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
def send_request(destination):
content = yield self._do_send_join(destination, pdu)
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_join(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
logger.debug("Got content: %s", content)
@@ -593,44 +735,6 @@ class FederationClient(FederationBase):
return self._try_destination_list("send_join", destinations, send_request)
@defer.inlineCallbacks
def _do_send_join(self, destination, pdu):
time_now = self._clock.time_msec()
try:
content = yield self.transport_layer.send_join_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
return content
except HttpResponseException as e:
if e.code in [400, 404]:
err = e.to_synapse_error()
# If we receive an error response that isn't a generic error, or an
# unrecognised endpoint error, we assume that the remote understands
# the v2 invite API and this is a legitimate error.
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
raise err
else:
raise e.to_synapse_error()
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
resp = yield self.transport_layer.send_join_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
# We expect the v1 API to respond with [200, content], so we only return the
# content.
return resp[1]
@defer.inlineCallbacks
def send_invite(self, destination, room_id, event_id, pdu):
room_version = yield self.store.get_room_version(room_id)
@@ -740,50 +844,18 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
def send_request(destination):
content = yield self._do_send_leave(destination, pdu)
logger.debug("Got content: %s", content)
return None
return self._try_destination_list("send_leave", destinations, send_request)
@defer.inlineCallbacks
def _do_send_leave(self, destination, pdu):
time_now = self._clock.time_msec()
try:
content = yield self.transport_layer.send_leave_v2(
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_leave(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
return content
except HttpResponseException as e:
if e.code in [400, 404]:
err = e.to_synapse_error()
logger.debug("Got content: %s", content)
return None
# If we receive an error response that isn't a generic error, or an
# unrecognised endpoint error, we assume that the remote understands
# the v2 invite API and this is a legitimate error.
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
raise err
else:
raise e.to_synapse_error()
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
resp = yield self.transport_layer.send_leave_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
# We expect the v1 API to respond with [200, content], so we only return the
# content.
return resp[1]
return self._try_destination_list("send_leave", destinations, send_request)
def get_public_rooms(
self,

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 Matrix.org Federation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,6 +21,7 @@ from six import iteritems
from canonicaljson import json
from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
@@ -74,7 +74,6 @@ class FederationServer(FederationBase):
self.auth = hs.get_auth()
self.handler = hs.get_handlers().federation_handler
self.state = hs.get_state_handler()
self._server_linearizer = Linearizer("fed_server")
self._transaction_linearizer = Linearizer("fed_txn_handler")
@@ -87,12 +86,14 @@ class FederationServer(FederationBase):
# come in waves.
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
async def on_backfill_request(self, origin, room_id, versions, limit):
with (await self._server_linearizer.queue((origin, room_id))):
@defer.inlineCallbacks
@log_function
def on_backfill_request(self, origin, room_id, versions, limit):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
pdus = await self.handler.on_backfill_request(
pdus = yield self.handler.on_backfill_request(
origin, room_id, versions, limit
)
@@ -100,7 +101,9 @@ class FederationServer(FederationBase):
return 200, res
async def on_incoming_transaction(self, origin, transaction_data):
@defer.inlineCallbacks
@log_function
def on_incoming_transaction(self, origin, transaction_data):
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
@@ -115,17 +118,18 @@ class FederationServer(FederationBase):
# use a linearizer to ensure that we don't process the same transaction
# multiple times in parallel.
with (
await self._transaction_linearizer.queue(
yield self._transaction_linearizer.queue(
(origin, transaction.transaction_id)
)
):
result = await self._handle_incoming_transaction(
result = yield self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
async def _handle_incoming_transaction(self, origin, transaction, request_time):
@defer.inlineCallbacks
def _handle_incoming_transaction(self, origin, transaction, request_time):
""" Process an incoming transaction and return the HTTP response
Args:
@@ -136,7 +140,7 @@ class FederationServer(FederationBase):
Returns:
Deferred[(int, object)]: http response code and body
"""
response = await self.transaction_actions.have_responded(origin, transaction)
response = yield self.transaction_actions.have_responded(origin, transaction)
if response:
logger.debug(
@@ -147,7 +151,7 @@ class FederationServer(FederationBase):
logger.debug("[%s] Transaction is new", transaction.transaction_id)
# Reject if PDU count > 50 or EDU count > 100
# Reject if PDU count > 50 and EDU count > 100
if len(transaction.pdus) > 50 or (
hasattr(transaction, "edus") and len(transaction.edus) > 100
):
@@ -155,7 +159,7 @@ class FederationServer(FederationBase):
logger.info("Transaction PDU or EDU count too large. Returning 400")
response = {}
await self.transaction_actions.set_response(
yield self.transaction_actions.set_response(
origin, transaction, 400, response
)
return 400, response
@@ -191,7 +195,7 @@ class FederationServer(FederationBase):
continue
try:
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
@@ -217,12 +221,13 @@ class FederationServer(FederationBase):
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
async def process_pdus_for_room(room_id):
@defer.inlineCallbacks
def process_pdus_for_room(room_id):
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
@@ -232,10 +237,10 @@ class FederationServer(FederationBase):
event_id = pdu.event_id
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
yield self._handle_received_pdu(origin, pdu)
pdu_results[event_id] = {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
logger.warn("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
f = failure.Failure()
@@ -246,30 +251,36 @@ class FederationServer(FederationBase):
exc_info=(f.type, f.value, f.getTracebackObject()),
)
await concurrently_execute(
yield concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if hasattr(transaction, "edus"):
for edu in (Edu(**x) for x in transaction.edus):
await self.received_edu(origin, edu.edu_type, edu.content)
yield self.received_edu(origin, edu.edu_type, edu.content)
response = {"pdus": pdu_results}
logger.debug("Returning: %s", str(response))
await self.transaction_actions.set_response(origin, transaction, 200, response)
yield self.transaction_actions.set_response(origin, transaction, 200, response)
return 200, response
async def received_edu(self, origin, edu_type, content):
@defer.inlineCallbacks
def received_edu(self, origin, edu_type, content):
received_edus_counter.inc()
await self.registry.on_edu(edu_type, origin, content)
yield self.registry.on_edu(edu_type, origin, content)
@defer.inlineCallbacks
@log_function
def on_context_state_request(self, origin, room_id, event_id):
if not event_id:
raise NotImplementedError("Specify an event")
async def on_context_state_request(self, origin, room_id, event_id):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -278,79 +289,75 @@ class FederationServer(FederationBase):
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
with (await self._server_linearizer.queue((origin, room_id))):
resp = dict(
await self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
event_id,
)
with (yield self._server_linearizer.queue((origin, room_id))):
resp = yield self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
event_id,
)
room_version = await self.store.get_room_version(room_id)
resp["room_version"] = room_version
return 200, resp
async def on_state_ids_request(self, origin, room_id, event_id):
@defer.inlineCallbacks
def on_state_ids_request(self, origin, room_id, event_id):
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
async def _on_context_state_request_compute(self, room_id, event_id):
if event_id:
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
@defer.inlineCallbacks
def _on_context_state_request_compute(self, room_id, event_id):
pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
async def on_pdu_request(self, origin, event_id):
pdu = await self.handler.get_persisted_pdu(origin, event_id)
@defer.inlineCallbacks
@log_function
def on_pdu_request(self, origin, event_id):
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return 404, ""
async def on_query_request(self, query_type, args):
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
resp = await self.registry.on_query(query_type, args)
resp = yield self.registry.on_query(query_type, args)
return 200, resp
async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
@defer.inlineCallbacks
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
if room_version not in supported_versions:
logger.warning(
"Room version %s not in %s", room_version, supported_versions
)
logger.warn("Room version %s not in %s", room_version, supported_versions)
raise IncompatibleRoomVersionError(room_version=room_version)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_invite_request(self, origin, content, room_version):
@defer.inlineCallbacks
def on_invite_request(self, origin, content, room_version):
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
@@ -362,71 +369,81 @@ class FederationServer(FederationBase):
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
async def on_send_join_request(self, origin, content, room_id):
@defer.inlineCallbacks
def on_send_join_request(self, origin, content, room_id):
logger.debug("on_send_join_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return {
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
}
return (
200,
{
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
],
},
)
async def on_make_leave_request(self, origin, room_id, user_id):
@defer.inlineCallbacks
def on_make_leave_request(self, origin, room_id, user_id):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
yield self.check_server_matches_acl(origin_host, room_id)
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_send_leave_request(self, origin, content, room_id):
@defer.inlineCallbacks
def on_send_leave_request(self, origin, content, room_id):
logger.debug("on_send_leave_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
yield self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
yield self.handler.on_send_leave_request(origin, pdu)
return 200, {}
async def on_event_auth(self, origin, room_id, event_id):
with (await self._server_linearizer.queue((origin, room_id))):
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
auth_pdus = await self.handler.on_event_auth(event_id)
auth_pdus = yield self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
async def on_query_auth_request(self, origin, content, room_id, event_id):
@defer.inlineCallbacks
def on_query_auth_request(self, origin, content, room_id, event_id):
"""
Content is a dict with keys::
auth_chain (list): A list of events that give the auth chain.
@@ -445,22 +462,22 @@ class FederationServer(FederationBase):
Returns:
Deferred: Results in `dict` with the same format as `content`
"""
with (await self._server_linearizer.queue((origin, room_id))):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version(room_id)
room_version = yield self.store.get_room_version(room_id)
format_ver = room_version_to_event_format(room_version)
auth_chain = [
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
]
signed_auth = await self._check_sigs_and_hash_and_fetch(
signed_auth = yield self._check_sigs_and_hash_and_fetch(
origin, auth_chain, outlier=True, room_version=room_version
)
ret = await self.handler.on_query_auth(
ret = yield self.handler.on_query_auth(
origin,
event_id,
room_id,
@@ -486,14 +503,16 @@ class FederationServer(FederationBase):
return self.on_query_request("user_devices", user_id)
@trace
async def on_claim_client_keys(self, origin, content):
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
for user_id, device_keys in results.items():
@@ -517,12 +536,14 @@ class FederationServer(FederationBase):
return {"one_time_keys": json_result}
async def on_get_missing_events(
@defer.inlineCallbacks
@log_function
def on_get_missing_events(
self, origin, room_id, earliest_events, latest_events, limit
):
with (await self._server_linearizer.queue((origin, room_id))):
with (yield self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
yield self.check_server_matches_acl(origin_host, room_id)
logger.info(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
@@ -532,7 +553,7 @@ class FederationServer(FederationBase):
limit,
)
missing_events = await self.handler.on_get_missing_events(
missing_events = yield self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit
)
@@ -565,7 +586,8 @@ class FederationServer(FederationBase):
destination=None,
)
async def _handle_received_pdu(self, origin, pdu):
@defer.inlineCallbacks
def _handle_received_pdu(self, origin, pdu):
""" Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
@@ -618,34 +640,37 @@ class FederationServer(FederationBase):
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
# We've already checked that we know the room version by this point
room_version = await self.store.get_room_version(pdu.room_id)
room_version = yield self.store.get_room_version(pdu.room_id)
# Check signature.
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
pdu = yield self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
async def exchange_third_party_invite(
@defer.inlineCallbacks
def exchange_third_party_invite(
self, sender_user_id, target_user_id, room_id, signed
):
ret = await self.handler.exchange_third_party_invite(
ret = yield self.handler.exchange_third_party_invite(
sender_user_id, target_user_id, room_id, signed
)
return ret
async def on_exchange_third_party_invite_request(self, room_id, event_dict):
ret = await self.handler.on_exchange_third_party_invite_request(
@defer.inlineCallbacks
def on_exchange_third_party_invite_request(self, room_id, event_dict):
ret = yield self.handler.on_exchange_third_party_invite_request(
room_id, event_dict
)
return ret
async def check_server_matches_acl(self, server_name, room_id):
@defer.inlineCallbacks
def check_server_matches_acl(self, server_name, room_id):
"""Check if the given server is allowed by the server ACLs in the room
Args:
@@ -655,13 +680,13 @@ class FederationServer(FederationBase):
Raises:
AuthError if the server does not match the ACL
"""
state_ids = await self.store.get_current_state_ids(room_id)
state_ids = yield self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = await self.store.get_event(acl_event_id)
acl_event = yield self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
@@ -684,7 +709,7 @@ def server_matches_acl_event(server_name, acl_event):
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignorning non-bool allow_ip_literals flag")
logger.warn("Ignorning non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
@@ -698,7 +723,7 @@ def server_matches_acl_event(server_name, acl_event):
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignorning non-list deny ACL %s", deny)
logger.warn("Ignorning non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
@@ -708,7 +733,7 @@ def server_matches_acl_event(server_name, acl_event):
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignorning non-list allow ACL %s", allow)
logger.warn("Ignorning non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):
@@ -722,7 +747,7 @@ def server_matches_acl_event(server_name, acl_event):
def _acl_entry_matches(server_name, acl_entry):
if not isinstance(acl_entry, six.string_types):
logger.warning(
logger.warn(
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
)
return False
@@ -774,14 +799,15 @@ class FederationHandlerRegistry(object):
self.query_handlers[query_type] = handler
async def on_edu(self, edu_type, origin, content):
@defer.inlineCallbacks
def on_edu(self, edu_type, origin, content):
handler = self.edu_handlers.get(edu_type)
if not handler:
logger.warning("No handler registered for EDU type %s", edu_type)
logger.warn("No handler registered for EDU type %s", edu_type)
with start_active_span_from_edu(content, "handle_edu"):
try:
await handler(origin, content)
yield handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
@@ -790,7 +816,7 @@ class FederationHandlerRegistry(object):
def on_query(self, query_type, args):
handler = self.query_handlers.get(query_type)
if not handler:
logger.warning("No handler registered for query type %s", query_type)
logger.warn("No handler registered for query type %s", query_type)
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
return handler(args)
@@ -814,7 +840,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
super(ReplicationFederationHandlerRegistry, self).__init__()
async def on_edu(self, edu_type, origin, content):
def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry
"""
if not self.config.use_presence and edu_type == "m.presence":
@@ -822,17 +848,17 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
handler = self.edu_handlers.get(edu_type)
if handler:
return await super(ReplicationFederationHandlerRegistry, self).on_edu(
return super(ReplicationFederationHandlerRegistry, self).on_edu(
edu_type, origin, content
)
return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
return self._send_edu(edu_type=edu_type, origin=origin, content=content)
async def on_query(self, query_type, args):
def on_query(self, query_type, args):
"""Overrides FederationHandlerRegistry
"""
handler = self.query_handlers.get(query_type)
if handler:
return await handler(args)
return handler(args)
return await self._get_query_client(query_type=query_type, args=args)
return self._get_query_client(query_type=query_type, args=args)

View File

@@ -44,7 +44,7 @@ class TransactionActions(object):
response code and response body.
"""
if not transaction.transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")
raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
return self.store.get_received_txn_response(transaction.transaction_id, origin)
@@ -56,7 +56,7 @@ class TransactionActions(object):
Deferred
"""
if not transaction.transaction_id:
raise RuntimeError("Cannot persist a transaction with no transaction_id")
raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
return self.store.set_received_txn_response(
transaction.transaction_id, origin, code, response

View File

@@ -36,8 +36,6 @@ from six import iteritems
from sortedcontainers import SortedDict
from twisted.internet import defer
from synapse.metrics import LaterGauge
from synapse.storage.presence import UserPresenceState
from synapse.util.metrics import Measure
@@ -214,7 +212,7 @@ class FederationRemoteSendQueue(object):
receipt (synapse.types.ReadReceipt):
"""
# nothing to do here: the replication listener will handle it.
return defer.succeed(None)
pass
def send_presence(self, states):
"""As per FederationSender

View File

@@ -49,7 +49,7 @@ sent_pdus_destination_dist_count = Counter(
sent_pdus_destination_dist_total = Counter(
"synapse_federation_client_sent_pdu_destinations:total",
"Total number of PDUs queued for sending across all destinations",
"" "Total number of PDUs queued for sending across all destinations",
)

View File

@@ -192,16 +192,15 @@ class PerDestinationQueue(object):
# We have to keep 2 free slots for presence and rr_edus
limit = MAX_EDUS_PER_TRANSACTION - 2
device_update_edus, dev_list_id = yield self._get_device_update_edus(
limit
device_update_edus, dev_list_id = (
yield self._get_device_update_edus(limit)
)
limit -= len(device_update_edus)
(
to_device_edus,
device_stream_id,
) = yield self._get_to_device_message_edus(limit)
to_device_edus, device_stream_id = (
yield self._get_to_device_message_edus(limit)
)
pending_edus = device_update_edus + to_device_edus
@@ -360,20 +359,20 @@ class PerDestinationQueue(object):
last_device_list = self._last_device_list_stream_id
# Retrieve list of new device updates to send to the destination
now_stream_id, results = yield self._store.get_device_updates_by_remote(
now_stream_id, results = yield self._store.get_devices_by_remote(
self._destination, last_device_list, limit=limit
)
edus = [
Edu(
origin=self._server_name,
destination=self._destination,
edu_type=edu_type,
edu_type="m.device_list_update",
content=content,
)
for (edu_type, content) in results
for content in results
]
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
return (edus, now_stream_id)

View File

@@ -84,7 +84,7 @@ class TransactionManager(object):
txn_id = str(self._next_txn_id)
logger.debug(
"TX [%s] {%s} Attempting new transaction (pdus: %d, edus: %d)",
"TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
destination,
txn_id,
len(pdus),
@@ -103,7 +103,7 @@ class TransactionManager(object):
self._next_txn_id += 1
logger.info(
"TX [%s] {%s} Sending transaction [%s], (PDUs: %d, EDUs: %d)",
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
destination,
txn_id,
transaction.transaction_id,
@@ -146,7 +146,7 @@ class TransactionManager(object):
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.warning(
logger.warn(
"TX [%s] {%s} Remote returned error for %s: %s",
destination,
txn_id,
@@ -155,7 +155,7 @@ class TransactionManager(object):
)
else:
for p in pdus:
logger.warning(
logger.warn(
"TX [%s] {%s} Failed to send event %s",
destination,
txn_id,

View File

@@ -14,9 +14,9 @@
# limitations under the License.
"""The transport layer is responsible for both sending transactions to remote
homeservers and receiving a variety of requests from other homeservers.
home servers and receiving a variety of requests from other home servers.
By default this is done over HTTPS (and all homeservers are required to
By default this is done over HTTPS (and all home servers are required to
support HTTPS), however individual pairings of servers may decide to
communicate over a different (albeit still reliable) protocol.
"""

View File

@@ -38,13 +38,37 @@ class TransportLayerClient(object):
self.server_name = hs.hostname
self.client = hs.get_http_client()
@log_function
def get_room_state(self, destination, room_id, event_id):
""" Requests all state for a given room from the given server at the
given event.
Args:
destination (str): The host name of the remote home server we want
to get the state from.
context (str): The name of the context we want the state of
event_id (str): The event we want the context at.
Returns:
Deferred: Results in a dict received from the remote homeserver.
"""
logger.debug("get_room_state dest=%s, room=%s", destination, room_id)
path = _create_v1_path("/state/%s", room_id)
return self.client.get_json(
destination,
path=path,
args={"event_id": event_id},
try_trailing_slash_on_400=True,
)
@log_function
def get_room_state_ids(self, destination, room_id, event_id):
""" Requests all state for a given room from the given server at the
given event. Returns the state's event_id's
Args:
destination (str): The host name of the remote homeserver we want
destination (str): The host name of the remote home server we want
to get the state from.
context (str): The name of the context we want the state of
event_id (str): The event we want the context at.
@@ -67,7 +91,7 @@ class TransportLayerClient(object):
""" Requests the pdu with give id and origin from the given server.
Args:
destination (str): The host name of the remote homeserver we want
destination (str): The host name of the remote home server we want
to get the state from.
event_id (str): The id of the event being requested.
timeout (int): How long to try (in ms) the destination for before
@@ -98,10 +122,10 @@ class TransportLayerClient(object):
Deferred: Results in a dict received from the remote homeserver.
"""
logger.debug(
"backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
"backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
destination,
room_id,
event_tuples,
repr(event_tuples),
str(limit),
)
@@ -243,7 +267,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_join_v1(self, destination, room_id, event_id, content):
def send_join(self, destination, room_id, event_id, content):
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
response = yield self.client.put_json(
@@ -254,18 +278,7 @@ class TransportLayerClient(object):
@defer.inlineCallbacks
@log_function
def send_join_v2(self, destination, room_id, event_id, content):
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination, path=path, data=content
)
return response
@defer.inlineCallbacks
@log_function
def send_leave_v1(self, destination, room_id, event_id, content):
def send_leave(self, destination, room_id, event_id, content):
path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
response = yield self.client.put_json(
@@ -281,24 +294,6 @@ class TransportLayerClient(object):
return response
@defer.inlineCallbacks
@log_function
def send_leave_v2(self, destination, room_id, event_id, content):
path = _create_v2_path("/send_leave/%s/%s", room_id, event_id)
response = yield self.client.put_json(
destination=destination,
path=path,
data=content,
# we want to do our best to send this through. The problem is
# that if it fails, we won't retry it later, so if the remote
# server was just having a momentary blip, the room will be out of
# sync.
ignore_backoff=True,
)
return response
@defer.inlineCallbacks
@log_function
def send_invite_v1(self, destination, room_id, event_id, content):

Some files were not shown because too many files have changed in this diff Show More