Compare commits
475 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6543296467 | |||
| 4ce05ec171 | |||
| caa52836e4 | |||
| 7e67cfc87d | |||
| cb2db17994 | |||
| 5bfd8855d6 | |||
| c965253e4b | |||
| 324d4f61b8 | |||
| 25f1244329 | |||
| b8e4b39b69 | |||
| cfcfb57e58 | |||
| 6828b47c45 | |||
| 2045356517 | |||
| 894d2addac | |||
| 31905a518e | |||
| 5324bc20a6 | |||
| 6637d90d77 | |||
| 4db394a4b3 | |||
| e77237b935 | |||
| 7712e751b8 | |||
| 7c429f92d6 | |||
| adb3a873fd | |||
| d156912c4c | |||
| fc316a4894 | |||
| 6676ee9c4a | |||
| ea0f0ad414 | |||
| 72acca6a32 | |||
| 54ae52ba96 | |||
| d21577bdcb | |||
| 239d86a134 | |||
| f8bc2ae883 | |||
| 4947de5a14 | |||
| b2dcddc413 | |||
| 40eda84933 | |||
| c3dda2874d | |||
| 424fd58237 | |||
| 3f97b4c16b | |||
| 257ef2c727 | |||
| d630c82349 | |||
| 67c991b78f | |||
| bc5cb8bfe8 | |||
| 35f3c366ef | |||
| ae49d29ef1 | |||
| e726e18737 | |||
| a964f18887 | |||
| 4643bb2a37 | |||
| 28b758fa0f | |||
| accd343f91 | |||
| 663238aeb4 | |||
| ffeafade48 | |||
| 451ec9b8b9 | |||
| 3bd049bbb7 | |||
| e3f528c544 | |||
| 332f3b36e5 | |||
| 52346990c8 | |||
| 31da85e467 | |||
| ec5fdd1333 | |||
| 2ac78438d8 | |||
| cc5f6eb608 | |||
| b1e7012dee | |||
| f5bb1531b7 | |||
| 9a2223d4c8 | |||
| 353396e3a7 | |||
| d95736a2bd | |||
| 52fe9788bc | |||
| 4cade96616 | |||
| 0f3614f0f6 | |||
| 21aa0a458f | |||
| 5e8abe9013 | |||
| 24da1ffcb6 | |||
| 96d35f1028 | |||
| adfdd82b21 | |||
| 30e9adf32f | |||
| aeaeb72ee4 | |||
| a1f8ea9051 | |||
| f166a8d1f5 | |||
| e1544b0af8 | |||
| 65b37f6729 | |||
| 18660a34d8 | |||
| 4a161a29ac | |||
| 8ad8bcbed0 | |||
| e519489fc4 | |||
| a9b393340f | |||
| 71ee22c0ba | |||
| 5e35f69ac3 | |||
| 852f80d8a6 | |||
| 75f87450d8 | |||
| d537be1ebd | |||
| d64bb32a73 | |||
| 9a4fb457cf | |||
| f3ea2f5a08 | |||
| 2ace775d88 | |||
| e216ec381a | |||
| b3a4e35ca8 | |||
| e126d83f74 | |||
| 649b6bc088 | |||
| b2ee65ea8c | |||
| 8437e2383e | |||
| d085a8a0a5 | |||
| edb8b6af9a | |||
| 9c41ba4c5f | |||
| af5d0ebc72 | |||
| 410bfd035a | |||
| 4ca3ef10b9 | |||
| 1a0997bbd5 | |||
| 09623446d4 | |||
| ff119879d6 | |||
| dc8747895e | |||
| 63d6ad1064 | |||
| e1f4c83f41 | |||
| ba7af15d4e | |||
| 8b77fc6506 | |||
| 6dcd6c40a0 | |||
| 4a33a6dd19 | |||
| 8863624f78 | |||
| 756d4942f5 | |||
| ddbbfc9512 | |||
| fe799f353d | |||
| f8421a1404 | |||
| 02c1f36ccd | |||
| 6cd11109db | |||
| e203874caa | |||
| 685fae1ba5 | |||
| ee86abb2d6 | |||
| c2f525a525 | |||
| 3eb15c01d9 | |||
| 9186c105a0 | |||
| a7f20500ff | |||
| b9449012db | |||
| c530f9af4d | |||
| 00f0d67566 | |||
| a785a2febe | |||
| 1056d6885a | |||
| 6b2867096b | |||
| ddd48b6851 | |||
| 65c6aee621 | |||
| aeda1b3b94 | |||
| 9dc84b7989 | |||
| 08a436ecb2 | |||
| c1ae453932 | |||
| 85901939c1 | |||
| 768b84409b | |||
| 2aa8943809 | |||
| cb0aeb147e | |||
| 0120875462 | |||
| b62c9db8d7 | |||
| ce1c975ebc | |||
| 418813b205 | |||
| 54dd5dc12b | |||
| 620f98b65b | |||
| fdec84aa42 | |||
| e57567f994 | |||
| 2252680a98 | |||
| 8ee62e4b98 | |||
| 57f09e01f5 | |||
| 72078e4be5 | |||
| 0ad75fd98e | |||
| 81731c6e75 | |||
| 23ea572125 | |||
| 1c3a61529f | |||
| 6d8576c4ce | |||
| 78ec11c085 | |||
| 5ee2beeddb | |||
| 708cef88cf | |||
| 7baeea9f37 | |||
| 19ba7c142e | |||
| 96562131a4 | |||
| 8c9a713f8d | |||
| 2173785f0d | |||
| e7777f3668 | |||
| 2030193e55 | |||
| 69d8fb83c6 | |||
| a9c44d4008 | |||
| c48ea98007 | |||
| 0f87b912ab | |||
| 0d27aba900 | |||
| 6f4a63df00 | |||
| d31f69afa0 | |||
| 9b9ee75666 | |||
| 70c0da4d82 | |||
| ce578031f4 | |||
| 651d930f16 | |||
| ef1a85e773 | |||
| 9e937c28ee | |||
| f0ef970824 | |||
| f085894cd1 | |||
| f8f14ba466 | |||
| ba110a2030 | |||
| 8bb7b15894 | |||
| 9fb350af65 | |||
| a8175d0f96 | |||
| b98971e8a4 | |||
| 65d54c5e8c | |||
| f9f1c8acbb | |||
| 4d394d6415 | |||
| 35f9165e96 | |||
| 4c1b799e1b | |||
| c01d543584 | |||
| 07929bd62f | |||
| 9eebd46048 | |||
| f9c9e1f076 | |||
| b7367c339d | |||
| 78cfc05fc4 | |||
| 265c0bd2fe | |||
| 24cc31ee96 | |||
| 3916e1b97a | |||
| 9cc168e42e | |||
| b2f8c21a9b | |||
| 49243c55a4 | |||
| e2a20326e8 | |||
| 486be06f48 | |||
| 41e4566682 | |||
| 234f55f3c4 | |||
| 6356f2088f | |||
| 4f5ca455bf | |||
| 83446a18fb | |||
| 271c322d08 | |||
| cdd3cb870d | |||
| a6fc6754f8 | |||
| 97b863fe32 | |||
| bf9a11c54d | |||
| 7c24d0f443 | |||
| c7376cdfe3 | |||
| 870c00e278 | |||
| 657d614f6a | |||
| 53b6559a89 | |||
| 745a48625d | |||
| 6e1b40dc26 | |||
| a65a5ea125 | |||
| 473acedcdd | |||
| a42567e4a8 | |||
| c8444b4152 | |||
| c350bc2f92 | |||
| e1648dc576 | |||
| 85f172ef96 | |||
| 73d091be48 | |||
| bc29a19731 | |||
| 94cdd6fffe | |||
| 21056ad12a | |||
| edc4c7d4c5 | |||
| 5e18dc7955 | |||
| 74897de01f | |||
| 1e202a90f1 | |||
| 92527d7b21 | |||
| 4c131b2c78 | |||
| 20d687516f | |||
| cd96b4586f | |||
| 318dd21b47 | |||
| c4bdf2d785 | |||
| f41027f746 | |||
| f8407975e7 | |||
| 772d414975 | |||
| 963ffb60b9 | |||
| b16fa43386 | |||
| f713c01e2b | |||
| e4ec82ce0f | |||
| 46e5db9eb2 | |||
| c5abb67e43 | |||
| dad8d68c99 | |||
| 6d360f099f | |||
| c9b27d0044 | |||
| cd31201267 | |||
| 1186612d6c | |||
| ec2cb9f298 | |||
| bb78276bdc | |||
| b9cba07962 | |||
| 70804392ae | |||
| 15a1a02e70 | |||
| 4f519d556e | |||
| 3f9b61ff95 | |||
| e914cf12f6 | |||
| b03cddaeb9 | |||
| affcc2cc36 | |||
| f03c9d3444 | |||
| eda14737cf | |||
| a6ebef1bfd | |||
| 5c3363233c | |||
| 71f3bd734f | |||
| 55bc8d531e | |||
| 1fe3cc2c9c | |||
| 915903eada | |||
| 08b2868ffe | |||
| d2f6a67cb4 | |||
| 4059d61e26 | |||
| b33c4f7a82 | |||
| 4fc53bf1fb | |||
| f697b4b4a2 | |||
| 541f1b92d9 | |||
| 24a214bd1b | |||
| 70d93cafdb | |||
| 807ec3bd99 | |||
| 0e3ab8afdc | |||
| 01ba7b38a7 | |||
| b437eb48b6 | |||
| 052513958d | |||
| 5570d1c93f | |||
| 02f99906f2 | |||
| 55a7da247a | |||
| 248111bae8 | |||
| c16e192e2f | |||
| cb2cbe4d26 | |||
| f141af4c79 | |||
| a6863da249 | |||
| 8822b33111 | |||
| f5d8fdf0a7 | |||
| d8d808db64 | |||
| 037360e6cf | |||
| c9e4748cb7 | |||
| 4086002827 | |||
| a7c818c79b | |||
| 1dffa78701 | |||
| ffe595381d | |||
| 506a63de67 | |||
| c2203bea57 | |||
| e252ffadbc | |||
| 0287d033ee | |||
| 4e1c7b79fa | |||
| 09957ce0e4 | |||
| 7134ca7daa | |||
| 6a0092d371 | |||
| cc6243b4c0 | |||
| 3b29a73f9f | |||
| 824bba2f78 | |||
| 49008e674f | |||
| 1586f2c7e7 | |||
| 1c1268245d | |||
| 416c7baee6 | |||
| 911b03ca31 | |||
| 07cb38e965 | |||
| a46574281d | |||
| c9a1b80a74 | |||
| f496d25877 | |||
| 988d8d6507 | |||
| c6516adbe0 | |||
| 5598445655 | |||
| fa7e52caf1 | |||
| 67a65918ad | |||
| 1cb84c6486 | |||
| fe1f2b4520 | |||
| a2c63c619a | |||
| 669b6cbda3 | |||
| befd58f47b | |||
| e3689ac6f7 | |||
| 57cdb046e4 | |||
| c6dbca2422 | |||
| ace947e8da | |||
| 53d7680e32 | |||
| 1f156398b9 | |||
| c61db13183 | |||
| c3fc176c60 | |||
| 6f4bc6d01d | |||
| 3b4216f961 | |||
| 42e707c663 | |||
| 9c94b48bf1 | |||
| f7e4a582ef | |||
| fb1a6914cf | |||
| 020add5099 | |||
| 61be1a2926 | |||
| f91f2a1f92 | |||
| 8f5bbdb987 | |||
| cd581338cf | |||
| dfe0cd71b6 | |||
| 3a74c03ffb | |||
| 69489f8eb1 | |||
| 64f2b8c3d8 | |||
| b2ff8c305f | |||
| 97c60ccaa3 | |||
| c6bcd38841 | |||
| eb9a0d9e48 | |||
| 54fef094b3 | |||
| 998f7fe7d4 | |||
| 670972c0e1 | |||
| bb6cec27a5 | |||
| 0467f33584 | |||
| dcc069a2e2 | |||
| 62588eae4a | |||
| d8c9109aee | |||
| fe51d6cacf | |||
| 395683add1 | |||
| e7943f660a | |||
| 233b14ebe1 | |||
| acd16ad86a | |||
| ecfba89a78 | |||
| 7c8c97e635 | |||
| d3f694d628 | |||
| 69f0054ce6 | |||
| 5db03535d5 | |||
| fa0dcbc8fa | |||
| 7d7eac61be | |||
| bc32f102cd | |||
| d78b1e339d | |||
| b7fe62b766 | |||
| ec6de1cc7d | |||
| a8d16f6c00 | |||
| e5c3a99091 | |||
| 9677613e9c | |||
| 6e677403b7 | |||
| 7e17959984 | |||
| 1de28183cb | |||
| 326b3dace7 | |||
| a2276d4d3c | |||
| 2cab02f9d1 | |||
| 7955abeaac | |||
| 46c12918ad | |||
| 9178ac1b6a | |||
| b39ca49db1 | |||
| 770d1ef673 | |||
| ba4cc5541c | |||
| 72bc6294ed | |||
| b4465564cc | |||
| 213d7eb227 | |||
| 47f767269c | |||
| a287f1e804 | |||
| 38474707b9 | |||
| 74c1e16106 | |||
| 307e313ef4 | |||
| d6e40e7cbd | |||
| d79151921a | |||
| 7dd7a385f9 | |||
| 2c35ffead2 | |||
| 09a135b039 | |||
| 65cb307e19 | |||
| fec7d88645 | |||
| 3f33879be4 | |||
| cc80968f62 | |||
| 387324688e | |||
| 9be41bc121 | |||
| 1a7ed37149 | |||
| e577a4b2ad | |||
| 561133c3c5 | |||
| e6c7e239ef | |||
| e419c44ba4 | |||
| 14504ad573 | |||
| 29207b4488 | |||
| a8aced58df | |||
| d0d8a22c13 | |||
| bcfc647e4d | |||
| 9aee28927b | |||
| da78f61778 | |||
| 4697c0de0b | |||
| 4cf3a30a20 | |||
| 64c2cfda8a | |||
| a71b8c87ec | |||
| 44ab048cfe | |||
| 2020f11916 | |||
| 0417ca1a64 | |||
| c40d7244f8 | |||
| 8ac766c44a | |||
| ff05c9b760 | |||
| 29a0bc5637 | |||
| 608947eedf | |||
| 848cd388d9 | |||
| e4d98188da | |||
| 47c02f82e3 | |||
| 0d7e9523e5 | |||
| 8f4a808d9d | |||
| 9eebc1e73b | |||
| f85b9842f0 | |||
| c3cd977fff | |||
| 39266a9c9f | |||
| 9fb96889a4 | |||
| 3ca4c7c516 | |||
| 73cf63784b | |||
| dc2cd6f79d | |||
| 22a9847670 | |||
| 480eac30eb | |||
| 404e8c8532 | |||
| 3e3f9b684e | |||
| 0563839535 | |||
| 1fabf82d50 | |||
| 41ad35b523 | |||
| cfdb84422d | |||
| a1aaf3eea6 | |||
| 8d3542a64e | |||
| 82c8799ec7 |
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
set -e
|
||||
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
|
||||
echo "Not merging forward, as this is a release branch"
|
||||
@@ -18,6 +18,8 @@ else
|
||||
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
fi
|
||||
|
||||
echo "--- merge_base_branch $GITBASE"
|
||||
|
||||
# Show what we are before
|
||||
git --no-pager show -s
|
||||
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
# Configuration file used for testing the 'synapse_port_db' script.
|
||||
# Tells the script to connect to the postgresql database that will be available in the
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: postgres
|
||||
host: postgres
|
||||
password: postgres
|
||||
database: synapse
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
Executable
+36
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create a PostgresEngine.
|
||||
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||
|
||||
# Connect to postgres to create the base database.
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = db_engine.module.connect(
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("CREATE DATABASE synapse;")
|
||||
cur.close()
|
||||
db_conn.close()
|
||||
Executable
+36
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
# driver), update the schema of the test SQLite database and run background updates on it,
|
||||
# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
|
||||
# test porting the SQLite database to the PostgreSQL database (with coverage).
|
||||
|
||||
set -xe
|
||||
cd `dirname $0`/../..
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2 coverage coverage-enable-subprocess
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare the databases"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
./.buildkite/scripts/create_postgres_db.py
|
||||
|
||||
echo "+++ Run synapse_port_db"
|
||||
|
||||
# Run the script
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
@@ -0,0 +1,18 @@
|
||||
# Configuration file used for testing the 'synapse_port_db' script.
|
||||
# Tells the 'update_database' script to connect to the test SQLite database to upgrade its
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: ".buildkite/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
Binary file not shown.
@@ -28,3 +28,14 @@ User sees updates to presence from other users in the incremental sync.
|
||||
Gapped incremental syncs include all state changes
|
||||
|
||||
Old members are included in gappy incr LL sync if they start speaking
|
||||
|
||||
# new failures as of https://github.com/matrix-org/sytest/pull/732
|
||||
Device list doesn't change if remote server is down
|
||||
Remote servers cannot set power levels in rooms without existing powerlevels
|
||||
Remote servers should reject attempts by non-creators to set the power levels
|
||||
|
||||
# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
|
||||
Server correctly handles incoming m.device_list_update
|
||||
|
||||
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
|
||||
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
### Pull Request Checklist
|
||||
|
||||
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
|
||||
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog)
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
|
||||
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
|
||||
|
||||
+190
@@ -1,3 +1,193 @@
|
||||
Synapse 1.7.0rc2 (2019-12-11)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix incorrect error message for invalid requests when setting user's avatar URL. ([\#6497](https://github.com/matrix-org/synapse/issues/6497))
|
||||
- Fix support for SQLite 3.7. ([\#6499](https://github.com/matrix-org/synapse/issues/6499))
|
||||
- Fix regression where sending email push would not work when using a pusher worker. ([\#6507](https://github.com/matrix-org/synapse/issues/6507), [\#6509](https://github.com/matrix-org/synapse/issues/6509))
|
||||
|
||||
|
||||
Synapse 1.7.0rc1 (2019-12-09)
|
||||
=============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Implement per-room message retention policies. ([\#5815](https://github.com/matrix-org/synapse/issues/5815), [\#6436](https://github.com/matrix-org/synapse/issues/6436))
|
||||
- Add etag and count fields to key backup endpoints to help clients guess if there are new keys. ([\#5858](https://github.com/matrix-org/synapse/issues/5858))
|
||||
- Add `/admin/v2/users` endpoint with pagination. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
|
||||
- Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. ([\#6119](https://github.com/matrix-org/synapse/issues/6119))
|
||||
- Implement the `/_matrix/federation/unstable/net.atleastfornow/state/<context>` API as drafted in MSC2314. ([\#6176](https://github.com/matrix-org/synapse/issues/6176))
|
||||
- Configure privacy-preserving settings by default for the room directory. ([\#6355](https://github.com/matrix-org/synapse/issues/6355))
|
||||
- Add ephemeral messages support by partially implementing [MSC2228](https://github.com/matrix-org/matrix-doc/pull/2228). ([\#6409](https://github.com/matrix-org/synapse/issues/6409))
|
||||
- Add support for [MSC 2367](https://github.com/matrix-org/matrix-doc/pull/2367), which allows specifying a reason on all membership events. ([\#6434](https://github.com/matrix-org/synapse/issues/6434))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Transfer non-standard power levels on room upgrade. ([\#6237](https://github.com/matrix-org/synapse/issues/6237))
|
||||
- Fix error from the Pillow library when uploading RGBA images. ([\#6241](https://github.com/matrix-org/synapse/issues/6241))
|
||||
- Correctly apply the event filter to the `state`, `events_before` and `events_after` fields in the response to `/context` requests. ([\#6329](https://github.com/matrix-org/synapse/issues/6329))
|
||||
- Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. ([\#6332](https://github.com/matrix-org/synapse/issues/6332))
|
||||
- Prevent account data syncs getting lost across TCP replication. ([\#6333](https://github.com/matrix-org/synapse/issues/6333))
|
||||
- Fix bug: TypeError in `register_user()` while using LDAP auth module. ([\#6406](https://github.com/matrix-org/synapse/issues/6406))
|
||||
- Fix an intermittent exception when handling read-receipts. ([\#6408](https://github.com/matrix-org/synapse/issues/6408))
|
||||
- Fix broken guest registration when there are existing blocks of numeric user IDs. ([\#6420](https://github.com/matrix-org/synapse/issues/6420))
|
||||
- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
|
||||
- Fix error when using synapse_port_db on a vanilla synapse db. ([\#6449](https://github.com/matrix-org/synapse/issues/6449))
|
||||
- Fix uploading multiple cross signing signatures for the same user. ([\#6451](https://github.com/matrix-org/synapse/issues/6451))
|
||||
- Fix bug which lead to exceptions being thrown in a loop when a cross-signed device is deleted. ([\#6462](https://github.com/matrix-org/synapse/issues/6462))
|
||||
- Fix `synapse_port_db` not exiting with a 0 code if something went wrong during the port process. ([\#6470](https://github.com/matrix-org/synapse/issues/6470))
|
||||
- Improve sanity-checking when receiving events over federation. ([\#6472](https://github.com/matrix-org/synapse/issues/6472))
|
||||
- Fix inaccurate per-block Prometheus metrics. ([\#6491](https://github.com/matrix-org/synapse/issues/6491))
|
||||
- Fix small performance regression for sending invites. ([\#6493](https://github.com/matrix-org/synapse/issues/6493))
|
||||
- Back out cross-signing code added in Synapse 1.5.0, which caused a performance regression. ([\#6494](https://github.com/matrix-org/synapse/issues/6494))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update documentation and variables in user contributed systemd reference file. ([\#6369](https://github.com/matrix-org/synapse/issues/6369), [\#6490](https://github.com/matrix-org/synapse/issues/6490))
|
||||
- Fix link in the user directory documentation. ([\#6388](https://github.com/matrix-org/synapse/issues/6388))
|
||||
- Add build instructions to the docker readme. ([\#6390](https://github.com/matrix-org/synapse/issues/6390))
|
||||
- Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. ([\#6443](https://github.com/matrix-org/synapse/issues/6443))
|
||||
- Write some docs for the quarantine_media api. ([\#6458](https://github.com/matrix-org/synapse/issues/6458))
|
||||
- Convert CONTRIBUTING.rst to markdown (among other small fixes). ([\#6461](https://github.com/matrix-org/synapse/issues/6461))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove admin/v1/users_paginate endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5925](https://github.com/matrix-org/synapse/issues/5925))
|
||||
- Remove fallback for federation with old servers which lack the /federation/v1/state_ids API. ([\#6488](https://github.com/matrix-org/synapse/issues/6488))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add benchmarks for structured logging and improve output performance. ([\#6266](https://github.com/matrix-org/synapse/issues/6266))
|
||||
- Improve the performance of outputting structured logging. ([\#6322](https://github.com/matrix-org/synapse/issues/6322))
|
||||
- Refactor some code in the event authentication path for clarity. ([\#6343](https://github.com/matrix-org/synapse/issues/6343), [\#6468](https://github.com/matrix-org/synapse/issues/6468), [\#6480](https://github.com/matrix-org/synapse/issues/6480))
|
||||
- Clean up some unnecessary quotation marks around the codebase. ([\#6362](https://github.com/matrix-org/synapse/issues/6362))
|
||||
- Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. ([\#6379](https://github.com/matrix-org/synapse/issues/6379))
|
||||
- Add a test scenario to make sure room history purges don't break `/messages` in the future. ([\#6392](https://github.com/matrix-org/synapse/issues/6392))
|
||||
- Clarifications for the email configuration settings. ([\#6423](https://github.com/matrix-org/synapse/issues/6423))
|
||||
- Add more tests to the blacklist when running in worker mode. ([\#6429](https://github.com/matrix-org/synapse/issues/6429))
|
||||
- Refactor data store layer to support multiple databases in the future. ([\#6454](https://github.com/matrix-org/synapse/issues/6454), [\#6464](https://github.com/matrix-org/synapse/issues/6464), [\#6469](https://github.com/matrix-org/synapse/issues/6469), [\#6487](https://github.com/matrix-org/synapse/issues/6487))
|
||||
- Port synapse.rest.client.v1 to async/await. ([\#6482](https://github.com/matrix-org/synapse/issues/6482))
|
||||
- Port synapse.rest.client.v2_alpha to async/await. ([\#6483](https://github.com/matrix-org/synapse/issues/6483))
|
||||
- Port SyncHandler to async/await. ([\#6484](https://github.com/matrix-org/synapse/issues/6484))
|
||||
|
||||
Synapse 1.6.1 (2019-11-28)
|
||||
==========================
|
||||
|
||||
Security updates
|
||||
----------------
|
||||
|
||||
This release includes a security fix ([\#6426](https://github.com/matrix-org/synapse/issues/6426), below). Administrators are encouraged to upgrade as soon as possible.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426))
|
||||
- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421))
|
||||
|
||||
|
||||
Synapse 1.6.0 (2019-11-26)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix phone home stats reporting. ([\#6418](https://github.com/matrix-org/synapse/issues/6418))
|
||||
|
||||
|
||||
Synapse 1.6.0rc2 (2019-11-25)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
|
||||
|
||||
|
||||
Synapse 1.6.0rc1 (2019-11-20)
|
||||
=============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add federation support for cross-signing. ([\#5727](https://github.com/matrix-org/synapse/issues/5727))
|
||||
- Increase default room version from 4 to 5, thereby enforcing server key validity period checks. ([\#6220](https://github.com/matrix-org/synapse/issues/6220))
|
||||
- Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. ([\#6238](https://github.com/matrix-org/synapse/issues/6238))
|
||||
- Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). ([\#6301](https://github.com/matrix-org/synapse/issues/6301), [\#6310](https://github.com/matrix-org/synapse/issues/6310), [\#6340](https://github.com/matrix-org/synapse/issues/6340))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. ([\#6213](https://github.com/matrix-org/synapse/issues/6213))
|
||||
- Remove a room from a server's public rooms list on room upgrade. ([\#6232](https://github.com/matrix-org/synapse/issues/6232), [\#6235](https://github.com/matrix-org/synapse/issues/6235))
|
||||
- Delete keys from key backup when deleting backup versions. ([\#6253](https://github.com/matrix-org/synapse/issues/6253))
|
||||
- Make notification of cross-signing signatures work with workers. ([\#6254](https://github.com/matrix-org/synapse/issues/6254))
|
||||
- Fix exception when remote servers attempt to join a room that they're not allowed to join. ([\#6278](https://github.com/matrix-org/synapse/issues/6278))
|
||||
- Prevent errors from appearing on Synapse startup if `git` is not installed. ([\#6284](https://github.com/matrix-org/synapse/issues/6284))
|
||||
- Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306))
|
||||
- Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307))
|
||||
- Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313))
|
||||
- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
|
||||
- Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335))
|
||||
- Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338))
|
||||
- Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359))
|
||||
- Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. ([\#6363](https://github.com/matrix-org/synapse/issues/6363))
|
||||
- Fix permission denied error when trying to generate a config file with the docker image. ([\#6389](https://github.com/matrix-org/synapse/issues/6389))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Contributor documentation now mentions script to run linters. ([\#6164](https://github.com/matrix-org/synapse/issues/6164))
|
||||
- Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. ([\#6257](https://github.com/matrix-org/synapse/issues/6257))
|
||||
- Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. ([\#6272](https://github.com/matrix-org/synapse/issues/6272))
|
||||
- Fix a small typo in `account_threepid_delegates` configuration option. ([\#6273](https://github.com/matrix-org/synapse/issues/6273))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add a CI job to test the `synapse_port_db` script. ([\#6140](https://github.com/matrix-org/synapse/issues/6140), [\#6276](https://github.com/matrix-org/synapse/issues/6276))
|
||||
- Convert EventContext to an attrs. ([\#6218](https://github.com/matrix-org/synapse/issues/6218))
|
||||
- Move `persist_events` out from main data store. ([\#6240](https://github.com/matrix-org/synapse/issues/6240), [\#6300](https://github.com/matrix-org/synapse/issues/6300))
|
||||
- Reduce verbosity of user/room stats. ([\#6250](https://github.com/matrix-org/synapse/issues/6250))
|
||||
- Reduce impact of debug logging. ([\#6251](https://github.com/matrix-org/synapse/issues/6251))
|
||||
- Expose some homeserver functionality to spam checkers. ([\#6259](https://github.com/matrix-org/synapse/issues/6259))
|
||||
- Change cache descriptors to always return deferreds. ([\#6263](https://github.com/matrix-org/synapse/issues/6263), [\#6291](https://github.com/matrix-org/synapse/issues/6291))
|
||||
- Fix incorrect comment regarding the functionality of an `if` statement. ([\#6269](https://github.com/matrix-org/synapse/issues/6269))
|
||||
- Update CI to run `isort` over the `scripts` and `scripts-dev` directories. ([\#6270](https://github.com/matrix-org/synapse/issues/6270))
|
||||
- Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. ([\#6271](https://github.com/matrix-org/synapse/issues/6271), [\#6314](https://github.com/matrix-org/synapse/issues/6314))
|
||||
- Port replication http server endpoints to async/await. ([\#6274](https://github.com/matrix-org/synapse/issues/6274))
|
||||
- Port room rest handlers to async/await. ([\#6275](https://github.com/matrix-org/synapse/issues/6275))
|
||||
- Remove redundant CLI parameters on CI's `flake8` step. ([\#6277](https://github.com/matrix-org/synapse/issues/6277))
|
||||
- Port `federation_server.py` to async/await. ([\#6279](https://github.com/matrix-org/synapse/issues/6279))
|
||||
- Port receipt and read markers to async/wait. ([\#6280](https://github.com/matrix-org/synapse/issues/6280))
|
||||
- Split out state storage into separate data store. ([\#6294](https://github.com/matrix-org/synapse/issues/6294), [\#6295](https://github.com/matrix-org/synapse/issues/6295))
|
||||
- Refactor EventContext for clarity. ([\#6298](https://github.com/matrix-org/synapse/issues/6298))
|
||||
- Update the version of black used to 19.10b0. ([\#6304](https://github.com/matrix-org/synapse/issues/6304))
|
||||
- Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305))
|
||||
- Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308))
|
||||
- Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312))
|
||||
- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
|
||||
- Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336))
|
||||
- Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319))
|
||||
- Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330))
|
||||
- Add continuous integration for python 3.8. ([\#6341](https://github.com/matrix-org/synapse/issues/6341))
|
||||
- Correct spacing/case of various instances of the word "homeserver". ([\#6357](https://github.com/matrix-org/synapse/issues/6357))
|
||||
- Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. ([\#6361](https://github.com/matrix-org/synapse/issues/6361))
|
||||
|
||||
|
||||
Synapse 1.5.1 (2019-11-06)
|
||||
==========================
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
Contributing code to Matrix
|
||||
===========================
|
||||
# Contributing code to Matrix
|
||||
|
||||
Everyone is welcome to contribute code to Matrix
|
||||
(https://github.com/matrix-org), provided that they are willing to license
|
||||
@@ -7,18 +6,17 @@ their contributions under the same license as the project itself. We follow a
|
||||
simple 'inbound=outbound' model for contributions: the act of submitting an
|
||||
'inbound' contribution means that the contributor agrees to license the code
|
||||
under the same terms as the project's overall 'outbound' license - in our
|
||||
case, this is almost always Apache Software License v2 (see LICENSE).
|
||||
case, this is almost always Apache Software License v2 (see [LICENSE](LICENSE)).
|
||||
|
||||
How to contribute
|
||||
~~~~~~~~~~~~~~~~~
|
||||
## How to contribute
|
||||
|
||||
The preferred and easiest way to contribute changes to Matrix is to fork the
|
||||
relevant project on github, and then create a pull request to ask us to pull
|
||||
your changes into our repo
|
||||
(https://help.github.com/articles/using-pull-requests/)
|
||||
relevant project on github, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull
|
||||
your changes into our repo.
|
||||
|
||||
**The single biggest thing you need to know is: please base your changes on
|
||||
the develop branch - /not/ master.**
|
||||
the develop branch - *not* master.**
|
||||
|
||||
We use the master branch to track the most recent release, so that folks who
|
||||
blindly clone the repo and automatically check out master get something that
|
||||
@@ -30,9 +28,8 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
|
||||
continuous integration. Buildkite builds need to be authorised by a
|
||||
maintainer. If your change breaks the build, this will be shown in GitHub, so
|
||||
We use [Buildkite](https://buildkite.com/matrix-dot-org/synapse) for continuous
|
||||
integration. If your change breaks the build, this will be shown in GitHub, so
|
||||
please keep an eye on the pull request for feedback.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
@@ -47,41 +44,63 @@ To run unit tests in a local development environment, you can use:
|
||||
set up PostgreSQL yourself.
|
||||
|
||||
Docker images are available for running the integration tests (SyTest) locally,
|
||||
see the `documentation in the SyTest repo
|
||||
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
|
||||
see the [documentation in the SyTest repo](
|
||||
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
|
||||
information.
|
||||
|
||||
Code style
|
||||
~~~~~~~~~~
|
||||
## Code style
|
||||
|
||||
All Matrix projects have a well-defined code-style - and sometimes we've even
|
||||
got as far as documenting it... For instance, synapse's code style doc lives
|
||||
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
|
||||
[here](docs/code_style.md).
|
||||
|
||||
To facilitate meeting these criteria you can run `scripts-dev/lint.sh`
|
||||
locally. Since this runs the tools listed in the above document, you'll need
|
||||
python 3.6 and to install each tool:
|
||||
|
||||
```
|
||||
# Install the dependencies
|
||||
pip install -U black flake8 isort
|
||||
|
||||
# Run the linter script
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
**Note that the script does not just test/check, but also reformats code, so you
|
||||
may wish to ensure any new code is committed first**. By default this script
|
||||
checks all files and can take some time; if you alter only certain files, you
|
||||
might wish to specify paths as arguments to reduce the run-time:
|
||||
|
||||
```
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||
files that were corrected.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
Changelog
|
||||
~~~~~~~~~
|
||||
|
||||
## Changelog
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d`` file named
|
||||
in the format of ``PRnumber.type``. The type can be one of the following:
|
||||
To create a changelog entry, make a new file in the `changelog.d` directory named
|
||||
in the format of `PRnumber.type`. The type can be one of the following:
|
||||
|
||||
* ``feature``.
|
||||
* ``bugfix``.
|
||||
* ``docker`` (for updates to the Docker image).
|
||||
* ``doc`` (for updates to the documentation).
|
||||
* ``removal`` (also used for deprecations).
|
||||
* ``misc`` (for internal-only changes).
|
||||
* `feature`
|
||||
* `bugfix`
|
||||
* `docker` (for updates to the Docker image)
|
||||
* `doc` (for updates to the documentation)
|
||||
* `removal` (also used for deprecations)
|
||||
* `misc` (for internal-only changes)
|
||||
|
||||
The content of the file is your changelog entry, which should be a short
|
||||
description of your change in the same style as the rest of our `changelog
|
||||
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
|
||||
description of your change in the same style as the rest of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md). The file can
|
||||
contain Markdown formatting, and should end with a full stop ('.') for
|
||||
consistency.
|
||||
|
||||
@@ -89,20 +108,21 @@ Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
`changelog.d/1234.bugfix`, and contain content like "The security levels of
|
||||
Florbs are now validated when received over federation. Contributed by Jane
|
||||
Matrix.".
|
||||
|
||||
Debian changelog
|
||||
----------------
|
||||
## Debian changelog
|
||||
|
||||
Changes which affect the debian packaging files (in ``debian``) are an
|
||||
Changes which affect the debian packaging files (in `debian`) are an
|
||||
exception.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command::
|
||||
next release. For this, run the following command:
|
||||
|
||||
dch
|
||||
```
|
||||
dch
|
||||
```
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
@@ -114,74 +134,77 @@ directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
Sign off
|
||||
~~~~~~~~
|
||||
## Sign off
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
|
||||
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
[submitting patches process](
|
||||
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
|
||||
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix::
|
||||
the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment::
|
||||
include the line in your commit or pull request comment:
|
||||
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
```
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
```
|
||||
|
||||
We accept contributions under a legally identifiable name, such as
|
||||
your name on government documentation or common-law names (names
|
||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
||||
accept anonymous contributions at this time.
|
||||
|
||||
Git allows you to add this signoff automatically when using the ``-s``
|
||||
flag to ``git commit``, which uses the name and email set in your
|
||||
``user.name`` and ``user.email`` git configs.
|
||||
Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
Conclusion
|
||||
~~~~~~~~~~
|
||||
## Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
matrix together all the fragmented communication technologies out there we are
|
||||
reliant on contributions and collaboration from the community to do so. So
|
||||
reliant on contributions and collaboration from the community to do so. So
|
||||
please get involved - and we hope you have as much fun hacking on Matrix as we
|
||||
do!
|
||||
+22
-12
@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5, 3.6, or 3.7
|
||||
- Python 3.5, 3.6, 3.7 or 3.8.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
@@ -109,8 +109,8 @@ Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
#### ArchLinux
|
||||
@@ -133,9 +133,9 @@ sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
#### Mac OS X
|
||||
#### macOS
|
||||
|
||||
Installing prerequisites on Mac OS X:
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
```
|
||||
xcode-select --install
|
||||
@@ -144,6 +144,14 @@ sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
```
|
||||
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||
|
||||
```
|
||||
brew install openssl@1.1
|
||||
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
|
||||
```
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
@@ -413,16 +421,18 @@ For a more detailed guide to configuring your server for federation, see
|
||||
|
||||
## Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. For example,
|
||||
this is required to support the 'password reset' feature.
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
is added to a user's account, and send email notifications to users when they
|
||||
receive new messages.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||
``smtp_pass``, and ``require_transport_security``.
|
||||
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
|
||||
and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
`smtp_pass`, and `require_transport_security`.
|
||||
|
||||
If Synapse is not configured with an SMTP server, password reset via email will
|
||||
be disabled by default.
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
## Registering a user
|
||||
|
||||
|
||||
+17
@@ -75,6 +75,23 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.7.0
|
||||
===================
|
||||
|
||||
In an attempt to configure Synapse in a privacy preserving way, the default
|
||||
behaviours of ``allow_public_rooms_without_auth`` and
|
||||
``allow_public_rooms_over_federation`` have been inverted. This means that by
|
||||
default, only authenticated users querying the Client/Server API will be able
|
||||
to query the room directory, and relatedly that the server will not share
|
||||
room directory information with other servers over federation.
|
||||
|
||||
If your installation does not explicitly set these settings one way or the other
|
||||
and you want either setting to be ``true`` then it will necessary to update
|
||||
your homeserver configuration file accordingly.
|
||||
|
||||
For more details on the surrounding context see our `explainer
|
||||
<https://matrix.org/blog/2019/11/09/avoiding-unwelcome-visitors-on-private-matrix-servers>`_.
|
||||
|
||||
|
||||
Upgrading to v1.5.0
|
||||
===================
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)).
|
||||
@@ -0,0 +1 @@
|
||||
Prevent redacted events from being returned during message search.
|
||||
@@ -0,0 +1 @@
|
||||
Prevent error on trying to search a upgraded room when the server is not in the predecessor room.
|
||||
@@ -0,0 +1 @@
|
||||
Add a develop script to generate full SQL schemas.
|
||||
@@ -0,0 +1 @@
|
||||
Allow custom SAML username mapping functinality through an external provider plugin.
|
||||
@@ -0,0 +1 @@
|
||||
Improve performance of looking up cross-signing keys.
|
||||
@@ -0,0 +1 @@
|
||||
Port synapse.handlers.initial_sync to async/await.
|
||||
@@ -0,0 +1 @@
|
||||
Refactor get_events_from_store_or_dest to return a dict.
|
||||
@@ -0,0 +1 @@
|
||||
Remove redundant code from event authorisation implementation.
|
||||
@@ -0,0 +1 @@
|
||||
Move get_state methods into FederationHandler.
|
||||
@@ -0,0 +1 @@
|
||||
Port handlers.account_data and handlers.account_validity to async/await.
|
||||
@@ -0,0 +1 @@
|
||||
Make `make_deferred_yieldable` to work with async/await.
|
||||
@@ -0,0 +1 @@
|
||||
Remove `SnapshotCache` in favour of `ResponseCache`.
|
||||
@@ -0,0 +1 @@
|
||||
Change phone home stats to not assume there is a single database and report information about the database used by the main data store.
|
||||
@@ -0,0 +1 @@
|
||||
Move database config from apps into HomeServer object.
|
||||
@@ -0,0 +1 @@
|
||||
Silence mypy errors for files outside those specified.
|
||||
@@ -0,0 +1 @@
|
||||
Fix race which occasionally caused deleted devices to reappear.
|
||||
@@ -0,0 +1 @@
|
||||
Clean up some logging when handling incoming events over federation.
|
||||
@@ -0,0 +1 @@
|
||||
Port some of FederationHandler to async/await.
|
||||
@@ -0,0 +1 @@
|
||||
Refactor some code in the event authentication path for clarity.
|
||||
@@ -0,0 +1 @@
|
||||
Prevent redacted events from being returned during message search.
|
||||
@@ -0,0 +1,2 @@
|
||||
Improve sanity-checking when receiving events over federation.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Test more folders against mypy.
|
||||
@@ -0,0 +1 @@
|
||||
Adjust the sytest blacklist for worker mode.
|
||||
@@ -78,7 +78,7 @@ class InputOutput(object):
|
||||
m = re.match("^join (\S+)$", line)
|
||||
if m:
|
||||
# The `sender` wants to join a room.
|
||||
room_name, = m.groups()
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("%s joining %s" % (self.user, room_name))
|
||||
self.server.join_room(room_name, self.user, self.user)
|
||||
# self.print_line("OK.")
|
||||
@@ -105,7 +105,7 @@ class InputOutput(object):
|
||||
m = re.match("^backfill (\S+)$", line)
|
||||
if m:
|
||||
# we want to backfill a room
|
||||
room_name, = m.groups()
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("backfill %s" % room_name)
|
||||
self.server.backfill(room_name)
|
||||
return
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
# Setup Synapse with Systemd
|
||||
This is a setup for managing synapse with a user contributed systemd unit
|
||||
file. It provides a `matrix-synapse` systemd unit file that should be tailored
|
||||
to accommodate your installation in accordance with the installation
|
||||
instructions provided in [installation instructions](../../INSTALL.md).
|
||||
|
||||
## Setup
|
||||
1. Under the service section, ensure the `User` variable matches which user
|
||||
you installed synapse under and wish to run it as.
|
||||
2. Under the service section, ensure the `WorkingDirectory` variable matches
|
||||
where you have installed synapse.
|
||||
3. Under the service section, ensure the `ExecStart` variable matches the
|
||||
appropriate locations of your installation.
|
||||
4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
|
||||
5. Start Synapse: `sudo systemctl start matrix-synapse`
|
||||
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
|
||||
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
|
||||
@@ -4,8 +4,11 @@
|
||||
# systemctl enable matrix-synapse
|
||||
# systemctl start matrix-synapse
|
||||
#
|
||||
# This assumes that Synapse has been installed by a user named
|
||||
# synapse.
|
||||
#
|
||||
# This assumes that Synapse has been installed in a virtualenv in
|
||||
# /opt/synapse/env.
|
||||
# the user's home directory: `/home/synapse/synapse/env`.
|
||||
#
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
@@ -22,8 +25,8 @@ Restart=on-abort
|
||||
User=synapse
|
||||
Group=nogroup
|
||||
|
||||
WorkingDirectory=/opt/synapse
|
||||
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
|
||||
WorkingDirectory=/home/synapse/synapse
|
||||
ExecStart=/home/synapse/synapse/env/bin/python -m synapse.app.homeserver --config-path=/home/synapse/synapse/homeserver.yaml
|
||||
SyslogIdentifier=matrix-synapse
|
||||
|
||||
# adjust the cache factor if necessary
|
||||
|
||||
Vendored
+12
@@ -1,3 +1,15 @@
|
||||
matrix-synapse-py3 (1.6.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.6.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 28 Nov 2019 11:10:40 +0000
|
||||
|
||||
matrix-synapse-py3 (1.6.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.6.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2019 12:15:40 +0000
|
||||
|
||||
matrix-synapse-py3 (1.5.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.5.1.
|
||||
|
||||
+13
-1
@@ -101,7 +101,7 @@ is suitable for local testing, but for any practical use, you will either need
|
||||
to use a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||
|
||||
For documentation on using a reverse proxy, see
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||
|
||||
For more information on enabling TLS support in synapse itself, see
|
||||
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
|
||||
@@ -130,3 +130,15 @@ docker run -it --rm \
|
||||
This will generate the same configuration file as the legacy mode used, but
|
||||
will store it in `/data/homeserver.yaml` instead of a temporary location. You
|
||||
can then use it as shown above at [Running synapse](#running-synapse).
|
||||
|
||||
## Building the image
|
||||
|
||||
If you need to build the image from a Synapse checkout, use the following `docker
|
||||
build` command from the repo's root:
|
||||
|
||||
```
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
You can choose to build a different docker image by changing the value of the `-f` flag to
|
||||
point to another Dockerfile.
|
||||
|
||||
+6
-5
@@ -169,11 +169,11 @@ def run_generate_config(environ, ownership):
|
||||
# log("running %s" % (args, ))
|
||||
|
||||
if ownership is not None:
|
||||
args = ["su-exec", ownership] + args
|
||||
os.execv("/sbin/su-exec", args)
|
||||
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
args = ["su-exec", ownership] + args
|
||||
os.execv("/sbin/su-exec", args)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
@@ -217,8 +217,9 @@ def main(args, environ):
|
||||
# backwards-compatibility generate-a-config-on-the-fly mode
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
error(
|
||||
"SYNAPSE_SERVER_NAME and SYNAPSE_CONFIG_PATH are mutually exclusive "
|
||||
"except in `generate` or `migrate_config` mode."
|
||||
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
|
||||
"in `generate` or `migrate_config` mode. To start synapse using a "
|
||||
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
|
||||
)
|
||||
|
||||
config_path = "/compiled/homeserver.yaml"
|
||||
|
||||
@@ -4,7 +4,7 @@ The captcha mechanism used is Google's ReCaptcha. This requires API keys from Go
|
||||
|
||||
## Getting keys
|
||||
|
||||
Requires a public/private key pair from:
|
||||
Requires a site/secret key pair from:
|
||||
|
||||
<https://developers.google.com/recaptcha/>
|
||||
|
||||
@@ -15,8 +15,8 @@ Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via `--generate-config`. Set the following value:
|
||||
|
||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||
recaptcha_public_key: YOUR_SITE_KEY
|
||||
recaptcha_private_key: YOUR_SECRET_KEY
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
|
||||
@@ -21,3 +21,20 @@ It returns a JSON body like the following:
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Quarantine media in a room
|
||||
|
||||
This API 'quarantines' all the media in a room.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/quarantine_media/<room_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
to any local media, and any locally-cached copies of remote media.
|
||||
|
||||
The media file itself (and any thumbnails) is not deleted from the server.
|
||||
|
||||
@@ -1,3 +1,48 @@
|
||||
List Accounts
|
||||
=============
|
||||
|
||||
This API returns all local user accounts.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
The parameters ``from`` and ``limit`` are required only for pagination.
|
||||
By default, a ``limit`` of 100 is used.
|
||||
The parameter ``user_id`` can be used to select only users with user ids that
|
||||
contain this value.
|
||||
The parameter ``guests=false`` can be used to exclude guest users,
|
||||
default is to include guest users.
|
||||
The parameter ``deactivated=true`` can be used to include deactivated users,
|
||||
default is to exclude deactivated users.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users left.
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"name": "<user_id1>",
|
||||
"password_hash": "<password_hash1>",
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"password_hash": "<password_hash2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0
|
||||
}
|
||||
],
|
||||
"next_token": "100"
|
||||
}
|
||||
|
||||
|
||||
Query Account
|
||||
=============
|
||||
|
||||
|
||||
@@ -0,0 +1,77 @@
|
||||
# SAML Mapping Providers
|
||||
|
||||
A SAML mapping provider is a Python class (loaded via a Python module) that
|
||||
works out how to map attributes of a SAML response object to Matrix-specific
|
||||
user attributes. Details such as user ID localpart, displayname, and even avatar
|
||||
URLs are all things that can be mapped from talking to a SSO service.
|
||||
|
||||
As an example, a SSO service may return the email address
|
||||
"john.smith@example.com" for a user, whereas Synapse will need to figure out how
|
||||
to turn that into a displayname when creating a Matrix user for this individual.
|
||||
It may choose `John Smith`, or `Smith, John [Example.com]` or any number of
|
||||
variations. As each Synapse configuration may want something different, this is
|
||||
where SAML mapping providers come into play.
|
||||
|
||||
## Enabling Providers
|
||||
|
||||
External mapping providers are provided to Synapse in the form of an external
|
||||
Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere,
|
||||
then tell Synapse where to look for the handler class by editing the
|
||||
`saml2_config.user_mapping_provider.module` config option.
|
||||
|
||||
`saml2_config.user_mapping_provider.config` allows you to provide custom
|
||||
configuration options to the module. Check with the module's documentation for
|
||||
what options it provides (if any). The options listed by default are for the
|
||||
user mapping provider built in to Synapse. If using a custom module, you should
|
||||
comment these options out and use those specified by the module instead.
|
||||
|
||||
## Building a Custom Mapping Provider
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `__init__(self, parsed_config)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
information from.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
mxid localpart mapping has failed. This should be used
|
||||
to create a deduplicated mxid localpart which should be
|
||||
returned instead. For example, if this method returns
|
||||
`john.doe` as the value of `mxid_localpart` in the returned
|
||||
dict, and that is already taken on the homeserver, this
|
||||
method will be called again with the same parameters but
|
||||
with failures=1. The method should then return a different
|
||||
`mxid_localpart` value, such as `john.doe1`.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - Required. The mxid localpart of the new user.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A `dict` representing the parsed content of the
|
||||
`saml2_config.user_mapping_provider.config` homeserver config option.
|
||||
Runs on homeserver startup. Providers should extract any option values
|
||||
they need here.
|
||||
- Whatever is returned will be passed back to the user mapping provider module's
|
||||
`__init__` method during construction.
|
||||
* `get_saml_attributes(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
- `config` - A object resulting from a call to `parse_config`.
|
||||
- Returns a tuple of two sets. The first set equates to the saml auth
|
||||
response attributes that are required for the module to function, whereas
|
||||
the second set consists of those attributes which can be used if available,
|
||||
but are not necessary.
|
||||
|
||||
## Synapse's Default Provider
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
specified in the config. It is located at
|
||||
[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py).
|
||||
+134
-30
@@ -54,15 +54,16 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'false', requires authentication to access the server's public rooms
|
||||
# directory through the client API. Defaults to 'true'.
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
#
|
||||
#allow_public_rooms_without_auth: false
|
||||
#allow_public_rooms_without_auth: true
|
||||
|
||||
# If set to 'false', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'true'.
|
||||
# If set to 'true', allows any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'false'.
|
||||
#
|
||||
#allow_public_rooms_over_federation: false
|
||||
#allow_public_rooms_over_federation: true
|
||||
|
||||
# The default room version for newly created rooms.
|
||||
#
|
||||
@@ -72,7 +73,7 @@ pid_file: DATADIR/homeserver.pid
|
||||
# For example, for room version 1, default_room_version should be set
|
||||
# to "1".
|
||||
#
|
||||
#default_room_version: "4"
|
||||
#default_room_version: "5"
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
#
|
||||
@@ -287,7 +288,7 @@ listeners:
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
# Resource-constrained homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
@@ -328,6 +329,69 @@ listeners:
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, a user won't be able to send a
|
||||
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
|
||||
# that's not within this range. This is especially useful in closed federations,
|
||||
# in which server admins can make sure every federating server applies the same
|
||||
# rules.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -743,11 +807,11 @@ uploads_path: "DATADIR/uploads"
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
# This homeserver's ReCAPTCHA public key.
|
||||
#
|
||||
#recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
# This homeserver's ReCAPTCHA private key.
|
||||
#
|
||||
#recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
@@ -955,7 +1019,7 @@ uploads_path: "DATADIR/uploads"
|
||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||
#
|
||||
account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.org
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
@@ -1186,33 +1250,58 @@ saml2_config:
|
||||
#
|
||||
#config_path: "CONFDIR/sp_conf.py"
|
||||
|
||||
# the lifetime of a SAML session. This defines how long a user has to
|
||||
# The lifetime of a SAML session. This defines how long a user has to
|
||||
# complete the authentication process, if allow_unsolicited is unset.
|
||||
# The default is 5 minutes.
|
||||
#
|
||||
#saml_session_lifetime: 5m
|
||||
|
||||
# The SAML attribute (after mapping via the attribute maps) to use to derive
|
||||
# the Matrix ID from. 'uid' by default.
|
||||
# An external module can be provided here as a custom solution to
|
||||
# mapping attributes returned from a saml provider onto a matrix user.
|
||||
#
|
||||
#mxid_source_attribute: displayName
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
#
|
||||
#module: mapping_provider.SamlMappingProvider
|
||||
|
||||
# The mapping system to use for mapping the saml attribute onto a matrix ID.
|
||||
# Options include:
|
||||
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||
# * 'dotreplace' (which replaces unpermitted characters with '.').
|
||||
# The default is 'hexencode'.
|
||||
#
|
||||
#mxid_mapping: dotreplace
|
||||
# Custom configuration values for the module. Below options are
|
||||
# intended for the built-in provider, they should be changed if
|
||||
# using a custom module. This section will be passed as a Python
|
||||
# dictionary to the module's `parse_config` method.
|
||||
#
|
||||
config:
|
||||
# The SAML attribute (after mapping via the attribute maps) to use
|
||||
# to derive the Matrix ID from. 'uid' by default.
|
||||
#
|
||||
# Note: This used to be configured by the
|
||||
# saml2_config.mxid_source_attribute option. If that is still
|
||||
# defined, its value will be used instead.
|
||||
#
|
||||
#mxid_source_attribute: displayName
|
||||
|
||||
# In previous versions of synapse, the mapping from SAML attribute to MXID was
|
||||
# always calculated dynamically rather than stored in a table. For backwards-
|
||||
# compatibility, we will look for user_ids matching such a pattern before
|
||||
# creating a new account.
|
||||
# The mapping system to use for mapping the saml attribute onto a
|
||||
# matrix ID.
|
||||
#
|
||||
# Options include:
|
||||
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||
# * 'dotreplace' (which replaces unpermitted characters with
|
||||
# '.').
|
||||
# The default is 'hexencode'.
|
||||
#
|
||||
# Note: This used to be configured by the
|
||||
# saml2_config.mxid_mapping option. If that is still defined, its
|
||||
# value will be used instead.
|
||||
#
|
||||
#mxid_mapping: dotreplace
|
||||
|
||||
# In previous versions of synapse, the mapping from SAML attribute to
|
||||
# MXID was always calculated dynamically rather than stored in a
|
||||
# table. For backwards- compatibility, we will look for user_ids
|
||||
# matching such a pattern before creating a new account.
|
||||
#
|
||||
# This setting controls the SAML attribute which will be used for this
|
||||
# backwards-compatibility lookup. Typically it should be 'uid', but if the
|
||||
# attribute maps are changed, it may be necessary to change it.
|
||||
# backwards-compatibility lookup. Typically it should be 'uid', but if
|
||||
# the attribute maps are changed, it may be necessary to change it.
|
||||
#
|
||||
# The default is 'uid'.
|
||||
#
|
||||
@@ -1270,8 +1359,23 @@ password_config:
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: false
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
#
|
||||
# # notif_from defines the "From" address to use when sending emails.
|
||||
# # It must be set if email sending is enabled.
|
||||
# #
|
||||
# # The placeholder '%(app)s' will be replaced by the application name,
|
||||
# # which is normally 'app_name' (below), but may be overridden by the
|
||||
# # Matrix client application.
|
||||
# #
|
||||
# # Note that the placeholder must be written '%(app)s', including the
|
||||
# # trailing 's'.
|
||||
# #
|
||||
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
#
|
||||
# # app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# # defaults to 'Matrix'.
|
||||
# #
|
||||
# #app_name: my_branded_matrix_server
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
|
||||
+14
-1
@@ -199,7 +199,20 @@ client (C):
|
||||
|
||||
#### REPLICATE (C)
|
||||
|
||||
Asks the server to replicate a given stream
|
||||
Asks the server to replicate a given stream. The syntax is:
|
||||
|
||||
```
|
||||
REPLICATE <stream_name> <token>
|
||||
```
|
||||
|
||||
Where `<token>` may be either:
|
||||
* a numeric stream_id to stream updates since (exclusive)
|
||||
* `NOW` to stream all subsequent updates.
|
||||
|
||||
The `<stream_name>` is the name of a replication stream to subscribe
|
||||
to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
|
||||
of streams). It can also be `ALL` to subscribe to all known streams,
|
||||
in which case the `<token>` must be set to `NOW`.
|
||||
|
||||
#### USER_SYNC (C)
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL here
|
||||
https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
[mypy]
|
||||
namespace_packages=True
|
||||
plugins=mypy_zope:plugin
|
||||
follow_imports=skip
|
||||
mypy_path=stubs
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin
|
||||
follow_imports = silent
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -27,7 +27,7 @@ class Store(object):
|
||||
"_store_pdu_reference_hash_txn"
|
||||
]
|
||||
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
simple_insert_txn = SQLBaseStore.__dict__["simple_insert_txn"]
|
||||
|
||||
|
||||
store = Store()
|
||||
|
||||
+11
-3
@@ -7,7 +7,15 @@
|
||||
|
||||
set -e
|
||||
|
||||
isort -y -rc synapse tests scripts-dev scripts
|
||||
flake8 synapse tests
|
||||
python3 -m black synapse tests scripts-dev scripts
|
||||
if [ $# -ge 1 ]
|
||||
then
|
||||
files=$*
|
||||
else
|
||||
files="synapse tests scripts-dev scripts"
|
||||
fi
|
||||
|
||||
echo "Linting these locations: $files"
|
||||
isort -y -rc $files
|
||||
flake8 $files
|
||||
python3 -m black $files
|
||||
./scripts-dev/config-lint.sh
|
||||
|
||||
Executable
+184
@@ -0,0 +1,184 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script generates SQL files for creating a brand new Synapse DB with the latest
|
||||
# schema, on both SQLite3 and Postgres.
|
||||
#
|
||||
# It does so by having Synapse generate an up-to-date SQLite DB, then running
|
||||
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
|
||||
|
||||
POSTGRES_HOST="localhost"
|
||||
POSTGRES_DB_NAME="synapse_full_schema.$$"
|
||||
|
||||
SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
|
||||
POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres"
|
||||
|
||||
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
|
||||
|
||||
usage() {
|
||||
echo
|
||||
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
|
||||
echo
|
||||
echo "-p <postgres_username>"
|
||||
echo " Username to connect to local postgres instance. The password will be requested"
|
||||
echo " during script execution."
|
||||
echo "-c"
|
||||
echo " CI mode. Enables coverage tracking and prints every command that the script runs."
|
||||
echo "-o <path>"
|
||||
echo " Directory to output full schema files to."
|
||||
echo "-h"
|
||||
echo " Display this help text."
|
||||
}
|
||||
|
||||
while getopts "p:co:h" opt; do
|
||||
case $opt in
|
||||
p)
|
||||
POSTGRES_USERNAME=$OPTARG
|
||||
;;
|
||||
c)
|
||||
# Print all commands that are being executed
|
||||
set -x
|
||||
|
||||
# Modify required dependencies for coverage
|
||||
REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess")
|
||||
|
||||
COVERAGE=1
|
||||
;;
|
||||
o)
|
||||
command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1)
|
||||
OUTPUT_DIR="$(realpath "$OPTARG")"
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
\?)
|
||||
echo "ERROR: Invalid option: -$OPTARG" >&2
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check that required dependencies are installed
|
||||
unsatisfied_requirements=()
|
||||
for dep in "${REQUIRED_DEPS[@]}"; do
|
||||
pip show "$dep" --quiet || unsatisfied_requirements+=("$dep")
|
||||
done
|
||||
if [ ${#unsatisfied_requirements} -ne 0 ]; then
|
||||
echo "Please install the following python packages: ${unsatisfied_requirements[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$POSTGRES_USERNAME" ]; then
|
||||
echo "No postgres username supplied"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$OUTPUT_DIR" ]; then
|
||||
echo "No output directory supplied"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD
|
||||
echo ""
|
||||
|
||||
# Exit immediately if a command fails
|
||||
set -e
|
||||
|
||||
# cd to root of the synapse directory
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# Create temporary SQLite and Postgres homeserver db configs and key file
|
||||
TMPDIR=$(mktemp -d)
|
||||
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
|
||||
SQLITE_CONFIG=$TMPDIR/sqlite.conf
|
||||
SQLITE_DB=$TMPDIR/homeserver.db
|
||||
POSTGRES_CONFIG=$TMPDIR/postgres.conf
|
||||
|
||||
# Ensure these files are delete on script exit
|
||||
trap 'rm -rf $TMPDIR' EXIT
|
||||
|
||||
cat > "$SQLITE_CONFIG" <<EOF
|
||||
server_name: "test"
|
||||
|
||||
signing_key_path: "$KEY_FILE"
|
||||
macaroon_secret_key: "abcde"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: "$SQLITE_DB"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
EOF
|
||||
|
||||
cat > "$POSTGRES_CONFIG" <<EOF
|
||||
server_name: "test"
|
||||
|
||||
signing_key_path: "$KEY_FILE"
|
||||
macaroon_secret_key: "abcde"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "$POSTGRES_USERNAME"
|
||||
host: "$POSTGRES_HOST"
|
||||
password: "$POSTGRES_PASSWORD"
|
||||
database: "$POSTGRES_DB_NAME"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
EOF
|
||||
|
||||
# Generate the server's signing key.
|
||||
echo "Generating SQLite3 db schema..."
|
||||
python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
echo "Running db background jobs..."
|
||||
scripts-dev/update_database --database-config "$SQLITE_CONFIG"
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
createdb $POSTGRES_DB_NAME
|
||||
|
||||
echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
|
||||
if [ -z "$COVERAGE" ]; then
|
||||
# No coverage needed
|
||||
scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
else
|
||||
# Coverage desired
|
||||
coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
|
||||
fi
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
# This needs to be done after synapse_port_db is run
|
||||
echo "Dropping unwanted db tables..."
|
||||
SQL="
|
||||
DROP TABLE schema_version;
|
||||
DROP TABLE applied_schema_deltas;
|
||||
DROP TABLE applied_module_schemas;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
echo "Cleaning up temporary Postgres database..."
|
||||
dropdb $POSTGRES_DB_NAME
|
||||
|
||||
echo "Done! Files dumped to: $OUTPUT_DIR"
|
||||
Executable
+100
@@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
logger = logging.getLogger("update_database")
|
||||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config, **kwargs):
|
||||
super(MockHomeserver, self).__init__(
|
||||
config.server_name, reactor=reactor, config=config, **kwargs
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Updates a synapse database to the latest schema and runs background updates"
|
||||
" on it."
|
||||
)
|
||||
)
|
||||
parser.add_argument("-v", action="store_true")
|
||||
parser.add_argument(
|
||||
"--database-config",
|
||||
type=argparse.FileType("r"),
|
||||
required=True,
|
||||
help="A database config file for either a SQLite3 database or a PostgreSQL one.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
|
||||
logging.basicConfig(**logging_config)
|
||||
|
||||
# Load, process and sanity-check the config.
|
||||
hs_config = yaml.safe_load(args.database_config)
|
||||
|
||||
if "database" not in hs_config:
|
||||
sys.stderr.write("The configuration file must have a 'database' section.\n")
|
||||
sys.exit(4)
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
# Instantiate and initialise the homeserver object.
|
||||
hs = MockHomeserver(config)
|
||||
|
||||
db_conn = hs.get_db_conn()
|
||||
# Update the database to the latest schema.
|
||||
prepare_database(db_conn, hs.database_engine, config=config)
|
||||
db_conn.commit()
|
||||
|
||||
# setup instantiates the store within the homeserver object.
|
||||
hs.setup()
|
||||
store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run_background_updates():
|
||||
yield store.db.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
# Apply all background updates on the database.
|
||||
reactor.callWhenRunning(
|
||||
lambda: run_as_background_process("background_updates", run_background_updates)
|
||||
)
|
||||
|
||||
reactor.run()
|
||||
@@ -72,7 +72,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
# check that the original exists
|
||||
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||
if not os.path.exists(original_file):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Original for %s/%s (%s) does not exist",
|
||||
origin_server,
|
||||
file_id,
|
||||
|
||||
+54
-75
@@ -47,6 +47,7 @@ from synapse.storage.data_stores.main.media_repository import (
|
||||
from synapse.storage.data_stores.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
|
||||
from synapse.storage.data_stores.main.state import StateBackgroundUpdateStore
|
||||
@@ -54,6 +55,7 @@ from synapse.storage.data_stores.main.stats import StatsStore
|
||||
from synapse.storage.data_stores.main.user_directory import (
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.database import Database
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
from synapse.util import Clock
|
||||
@@ -131,54 +133,22 @@ class Store(
|
||||
EventsBackgroundUpdatesStore,
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
RegistrationBackgroundUpdateStore,
|
||||
RoomBackgroundUpdateStore,
|
||||
RoomMemberBackgroundUpdateStore,
|
||||
SearchBackgroundUpdateStore,
|
||||
StateBackgroundUpdateStore,
|
||||
UserDirectoryBackgroundUpdateStore,
|
||||
StatsStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super().__init__(db_conn, hs)
|
||||
self.db_pool = hs.get_db_pool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def runInteraction(self, desc, func, *args, **kwargs):
|
||||
def r(conn):
|
||||
try:
|
||||
i = 0
|
||||
N = 5
|
||||
while True:
|
||||
try:
|
||||
txn = conn.cursor()
|
||||
return func(
|
||||
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||
*args,
|
||||
**kwargs
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
if self.database_engine.is_deadlock(e):
|
||||
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
|
||||
if i < N:
|
||||
i += 1
|
||||
conn.rollback()
|
||||
continue
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.debug("[TXN FAIL] {%s} %s", desc, e)
|
||||
raise
|
||||
|
||||
with PreserveLoggingContext():
|
||||
return (yield self.db_pool.runWithConnection(r))
|
||||
|
||||
def execute(self, f, *args, **kwargs):
|
||||
return self.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
return self.db.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
def execute_sql(self, sql, *args):
|
||||
def r(txn):
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
|
||||
return self.runInteraction("execute_sql", r)
|
||||
return self.db.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
@@ -221,7 +191,7 @@ class Porter(object):
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
row = yield self.postgres_store._simple_select_one(
|
||||
row = yield self.postgres_store.db.simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
@@ -231,12 +201,14 @@ class Porter(object):
|
||||
total_to_port = None
|
||||
if row is None:
|
||||
if table == "sent_transactions":
|
||||
forward_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
(
|
||||
forward_chunk,
|
||||
already_ported,
|
||||
total_to_port,
|
||||
) = yield self._setup_sent_transactions()
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
@@ -266,7 +238,7 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store.execute(delete_all)
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||
)
|
||||
@@ -320,7 +292,7 @@ class Porter(object):
|
||||
if table == "user_directory_stream_pos":
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
yield self.postgres_store._simple_insert(
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
table=table, values={"stream_id": None}
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
@@ -361,7 +333,9 @@ class Porter(object):
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction("select", r)
|
||||
headers, frows, brows = yield self.sqlite_store.db.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
@@ -375,7 +349,7 @@ class Porter(object):
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
@@ -414,7 +388,7 @@ class Porter(object):
|
||||
|
||||
return headers, rows
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
@@ -431,8 +405,8 @@ class Porter(object):
|
||||
rows_dict = []
|
||||
for row in rows:
|
||||
d = dict(zip(headers, row))
|
||||
if "\0" in d['value']:
|
||||
logger.warn('dropping search row %s', d)
|
||||
if "\0" in d["value"]:
|
||||
logger.warning("dropping search row %s", d)
|
||||
else:
|
||||
rows_dict.append(d)
|
||||
|
||||
@@ -452,7 +426,7 @@ class Porter(object):
|
||||
],
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
self.postgres_store.db.simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
@@ -502,17 +476,14 @@ class Porter(object):
|
||||
self.progress.set_state("Preparing %s" % config["name"])
|
||||
conn = self.setup_db(config, engine)
|
||||
|
||||
db_pool = adbapi.ConnectionPool(
|
||||
config["name"], **config["args"]
|
||||
)
|
||||
db_pool = adbapi.ConnectionPool(config["name"], **config["args"])
|
||||
|
||||
hs = MockHomeserver(self.hs_config, engine, conn, db_pool)
|
||||
|
||||
store = Store(conn, hs)
|
||||
store = Store(Database(hs), conn, hs)
|
||||
|
||||
yield store.runInteraction(
|
||||
"%s_engine.check_database" % config["name"],
|
||||
engine.check_database,
|
||||
yield store.db.runInteraction(
|
||||
"%s_engine.check_database" % config["name"], engine.check_database,
|
||||
)
|
||||
|
||||
return store
|
||||
@@ -520,7 +491,9 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def run_background_updates_on_postgres(self):
|
||||
# Manually apply all background updates on the PostgreSQL database.
|
||||
postgres_ready = yield self.postgres_store.has_completed_background_updates()
|
||||
postgres_ready = (
|
||||
yield self.postgres_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
if not postgres_ready:
|
||||
# Only say that we're running background updates when there are background
|
||||
@@ -528,9 +501,9 @@ class Porter(object):
|
||||
self.progress.set_state("Running background updates on PostgreSQL")
|
||||
|
||||
while not postgres_ready:
|
||||
yield self.postgres_store.do_next_background_update(100)
|
||||
yield self.postgres_store.db.updates.do_next_background_update(100)
|
||||
postgres_ready = yield (
|
||||
self.postgres_store.has_completed_background_updates()
|
||||
self.postgres_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -539,7 +512,9 @@ class Porter(object):
|
||||
self.sqlite_store = yield self.build_db_store(self.sqlite_config)
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = yield self.sqlite_store.has_completed_background_updates()
|
||||
updates_complete = (
|
||||
yield self.sqlite_store.db.updates.has_completed_background_updates()
|
||||
)
|
||||
if not updates_complete:
|
||||
sys.stderr.write(
|
||||
"Pending background updates exist in the SQLite3 database."
|
||||
@@ -580,22 +555,22 @@ class Porter(object):
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction("alter_table", alter_table)
|
||||
yield self.postgres_store.db.runInteraction("alter_table", alter_table)
|
||||
except Exception:
|
||||
# On Error Resume Next
|
||||
pass
|
||||
|
||||
yield self.postgres_store.runInteraction(
|
||||
yield self.postgres_store.db.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||
sqlite_tables = yield self.sqlite_store.db.simple_select_onecol(
|
||||
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||
)
|
||||
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
postgres_tables = yield self.postgres_store.db.simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
@@ -647,7 +622,7 @@ class Porter(object):
|
||||
if isinstance(col, bytes):
|
||||
return bytearray(col)
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||
table,
|
||||
headers[j],
|
||||
@@ -685,11 +660,11 @@ class Porter(object):
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
ts_ind = headers.index('ts')
|
||||
ts_ind = headers.index("ts")
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
headers, rows = yield self.sqlite_store.db.runInteraction("select", r)
|
||||
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
@@ -722,7 +697,7 @@ class Porter(object):
|
||||
next_chunk = yield self.sqlite_store.execute(get_start_id)
|
||||
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
yield self.postgres_store.db.simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
@@ -735,7 +710,7 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
|
||||
)
|
||||
size, = txn.fetchone()
|
||||
(size,) = txn.fetchone()
|
||||
return int(size)
|
||||
|
||||
remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
|
||||
@@ -782,10 +757,13 @@ class Porter(object):
|
||||
def _setup_state_group_id_seq(self):
|
||||
def r(txn):
|
||||
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||
next_id = txn.fetchone()[0] + 1
|
||||
curr_id = txn.fetchone()[0]
|
||||
if not curr_id:
|
||||
return
|
||||
next_id = curr_id + 1
|
||||
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||
return self.postgres_store.db.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
|
||||
##############################################
|
||||
@@ -866,7 +844,7 @@ class CursesProgress(Progress):
|
||||
duration = int(now) - int(self.start_time)
|
||||
|
||||
minutes, seconds = divmod(duration, 60)
|
||||
duration_str = '%02dm %02ds' % (minutes, seconds)
|
||||
duration_str = "%02dm %02ds" % (minutes, seconds)
|
||||
|
||||
if self.finished:
|
||||
status = "Time spent: %s (Done!)" % (duration_str,)
|
||||
@@ -876,7 +854,7 @@ class CursesProgress(Progress):
|
||||
left = float(self.total_remaining) / self.total_processed
|
||||
|
||||
est_remaining = (int(now) - self.start_time) * left
|
||||
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
||||
est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60)
|
||||
else:
|
||||
est_remaining_str = "Unknown"
|
||||
status = "Time spent: %s (est. remaining: %s)" % (
|
||||
@@ -962,7 +940,7 @@ if __name__ == "__main__":
|
||||
description="A script to port an existing synapse SQLite database to"
|
||||
" a new PostgreSQL database."
|
||||
)
|
||||
parser.add_argument("-v", action='store_true')
|
||||
parser.add_argument("-v", action="store_true")
|
||||
parser.add_argument(
|
||||
"--sqlite-database",
|
||||
required=True,
|
||||
@@ -971,12 +949,12 @@ if __name__ == "__main__":
|
||||
)
|
||||
parser.add_argument(
|
||||
"--postgres-config",
|
||||
type=argparse.FileType('r'),
|
||||
type=argparse.FileType("r"),
|
||||
required=True,
|
||||
help="The database config file for the PostgreSQL database",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--curses", action='store_true', help="display a curses based progress UI"
|
||||
"--curses", action="store_true", help="display a curses based progress UI"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -1052,3 +1030,4 @@ if __name__ == "__main__":
|
||||
if end_error_exec_info:
|
||||
exc_type, exc_value, exc_traceback = end_error_exec_info
|
||||
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
||||
sys.exit(5)
|
||||
|
||||
@@ -20,3 +20,23 @@ parts:
|
||||
source: .
|
||||
plugin: python
|
||||
python-version: python3
|
||||
python-packages:
|
||||
- '.[all]'
|
||||
build-packages:
|
||||
- libffi-dev
|
||||
- libturbojpeg0-dev
|
||||
- libssl-dev
|
||||
- libxslt1-dev
|
||||
- libpq-dev
|
||||
- zlib1g-dev
|
||||
stage-packages:
|
||||
- libasn1-8-heimdal
|
||||
- libgssapi3-heimdal
|
||||
- libhcrypto4-heimdal
|
||||
- libheimbase1-heimdal
|
||||
- libheimntlm0-heimdal
|
||||
- libhx509-5-heimdal
|
||||
- libkrb5-26-heimdal
|
||||
- libldap-2.4-2
|
||||
- libpq5
|
||||
- libsasl2-2
|
||||
|
||||
+2
-2
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
""" This is a reference implementation of a Matrix homeserver.
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -36,7 +36,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.5.1"
|
||||
__version__ = "1.7.0rc2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -144,8 +144,8 @@ def main():
|
||||
logging.captureWarnings(True)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Used to register new users with a given home server when"
|
||||
" registration has been disabled. The home server must be"
|
||||
description="Used to register new users with a given homeserver when"
|
||||
" registration has been disabled. The homeserver must be"
|
||||
" configured with the 'registration_shared_secret' option"
|
||||
" set."
|
||||
)
|
||||
@@ -202,7 +202,7 @@ def main():
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs="?",
|
||||
help="URL to use to talk to the home server. Defaults to "
|
||||
help="URL to use to talk to the homeserver. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
)
|
||||
|
||||
|
||||
+1
-1
@@ -497,7 +497,7 @@ class Auth(object):
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018-2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -94,6 +95,8 @@ class EventTypes(object):
|
||||
ServerACL = "m.room.server_acl"
|
||||
Pinned = "m.room.pinned_events"
|
||||
|
||||
Retention = "m.room.retention"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
@@ -138,3 +141,14 @@ class LimitBlockingTypes(object):
|
||||
|
||||
MONTHLY_ACTIVE_USER = "monthly_active_user"
|
||||
HS_DISABLED = "hs_disabled"
|
||||
|
||||
|
||||
class EventContentFields(object):
|
||||
"""Fields found in events' content, regardless of type."""
|
||||
|
||||
# Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
|
||||
LABELS = "org.matrix.labels"
|
||||
|
||||
# Timestamp to delete the event after
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2228
|
||||
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
|
||||
|
||||
@@ -457,7 +457,7 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
|
||||
|
||||
class FederationError(RuntimeError):
|
||||
""" This class is used to inform remote home servers about erroneous
|
||||
""" This class is used to inform remote homeservers about erroneous
|
||||
PDUs they sent us.
|
||||
|
||||
FATAL: The remote server could not interpret the source event.
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018-2019 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +23,7 @@ from jsonschema import FormatChecker
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
@@ -66,6 +70,10 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"contains_url": {"type": "boolean"},
|
||||
"lazy_load_members": {"type": "boolean"},
|
||||
"include_redundant_members": {"type": "boolean"},
|
||||
# Include or exclude events with the provided labels.
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2326
|
||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -259,6 +267,9 @@ class Filter(object):
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
|
||||
self.labels = self.filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
def filters_all_types(self):
|
||||
return "*" in self.not_types
|
||||
|
||||
@@ -282,6 +293,7 @@ class Filter(object):
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
contains_url = False
|
||||
labels = []
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
@@ -300,10 +312,11 @@ class Filter(object):
|
||||
content = event.get("content", {})
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
contains_url = isinstance(content.get("url"), text_type)
|
||||
labels = content.get(EventContentFields.LABELS, [])
|
||||
|
||||
return self.check_fields(room_id, sender, ev_type, contains_url)
|
||||
return self.check_fields(room_id, sender, ev_type, labels, contains_url)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
def check_fields(self, room_id, sender, event_type, labels, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
@@ -313,6 +326,7 @@ class Filter(object):
|
||||
"rooms": lambda v: room_id == v,
|
||||
"senders": lambda v: sender == v,
|
||||
"types": lambda v: _matches_wildcard(event_type, v),
|
||||
"labels": lambda v: v in labels,
|
||||
}
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
|
||||
@@ -44,6 +44,8 @@ def check_bind_error(e, address, bind_addresses):
|
||||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == "0.0.0.0" and "::" in bind_addresses:
|
||||
logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
|
||||
logger.warning(
|
||||
"Failed to listen on 0.0.0.0, continuing because listening on [::]"
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
||||
@@ -269,7 +269,7 @@ def start(hs, listeners=None):
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().db.start_profiling()
|
||||
|
||||
setup_sentry(hs)
|
||||
setup_sdnotify(hs)
|
||||
|
||||
@@ -45,7 +45,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -229,14 +228,10 @@ def start(config_options):
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = AdminCmdServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -34,7 +34,6 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -94,7 +93,7 @@ class AppserviceServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -103,7 +102,7 @@ class AppserviceServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -143,8 +142,6 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
@@ -159,10 +156,8 @@ def start(config_options):
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ps, config, use_worker_options=True)
|
||||
|
||||
@@ -62,7 +62,6 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -153,7 +152,7 @@ class ClientReaderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -162,7 +161,7 @@ class ClientReaderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -181,14 +180,10 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -57,7 +57,6 @@ from synapse.rest.client.v1.room import (
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -147,7 +146,7 @@ class EventCreatorServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -156,7 +155,7 @@ class EventCreatorServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -180,14 +179,10 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -46,7 +46,6 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -132,7 +131,7 @@ class FederationReaderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -141,7 +140,7 @@ class FederationReaderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -162,14 +161,10 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -40,7 +40,7 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.replication.tcp.streams._base import ReceiptsStream
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.database import Database
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -59,8 +59,8 @@ class FederationSenderSlaveStore(
|
||||
SlavedDeviceStore,
|
||||
SlavedPresenceStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||
def __init__(self, database: Database, db_conn, hs):
|
||||
super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
# We pull out the current federation stream position now so that we
|
||||
# always have a known value for the federation position in memory so
|
||||
@@ -69,7 +69,7 @@ class FederationSenderSlaveStore(
|
||||
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||
|
||||
def _get_federation_out_pos(self, db_conn):
|
||||
sql = "SELECT stream_id FROM federation_stream_position" " WHERE type = ?"
|
||||
sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?"
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
|
||||
txn = db_conn.cursor()
|
||||
@@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -173,8 +173,6 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
@@ -189,10 +187,8 @@ def start(config_options):
|
||||
|
||||
ss = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -39,7 +39,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -204,7 +203,7 @@ class FrontendProxyServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -213,7 +212,7 @@ class FrontendProxyServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -234,14 +233,10 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
+102
-118
@@ -19,12 +19,13 @@ from __future__ import print_function
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import resource
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import psutil
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
@@ -67,9 +68,9 @@ from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.well_known import WellKnownResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore, are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -282,7 +283,7 @@ class SynapseHomeServer(HomeServer):
|
||||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -291,23 +292,7 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
def run_startup_checks(self, db_conn, database_engine):
|
||||
all_users_native = are_all_users_on_domain(
|
||||
db_conn.cursor(), database_engine, self.hostname
|
||||
)
|
||||
if not all_users_native:
|
||||
quit_with_error(
|
||||
"Found users in database not native to %s!\n"
|
||||
"You cannot changed a synapse server_name after it's been configured"
|
||||
% (self.hostname,)
|
||||
)
|
||||
|
||||
try:
|
||||
database_engine.check_database(db_conn.cursor())
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(str(e))
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
@@ -343,29 +328,20 @@ def setup(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
|
||||
|
||||
logger.info("Preparing database: %s...", config.database_config["name"])
|
||||
logger.info("Setting up server")
|
||||
|
||||
try:
|
||||
with hs.get_db_conn(run_new_connection=False) as db_conn:
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
hs.setup()
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(str(e))
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
@@ -374,9 +350,6 @@ def setup(config_options):
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database prepared in %s.", config.database_config["name"])
|
||||
|
||||
hs.setup()
|
||||
hs.setup_master()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -435,7 +408,7 @@ def setup(config_options):
|
||||
_base.start(hs, config.listeners)
|
||||
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_datastore().db.updates.start_doing_background_updates()
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
@@ -471,6 +444,89 @@ class SynapseService(service.Service):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
# currently either 0 or 1
|
||||
_stats_process = []
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - hs.start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
)
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
|
||||
#
|
||||
# Performance statistics
|
||||
#
|
||||
old = stats_process[0]
|
||||
new = (now, resource.getrusage(resource.RUSAGE_SELF))
|
||||
stats_process[0] = new
|
||||
|
||||
# Get RSS in bytes
|
||||
stats["memory_rss"] = new[1].ru_maxrss
|
||||
|
||||
# Get CPU time in % of a single core, not % of all cores
|
||||
used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - (
|
||||
old[1].ru_utime + old[1].ru_stime
|
||||
)
|
||||
if used_cpu_time == 0 or new[0] == old[0]:
|
||||
stats["cpu_average"] = 0
|
||||
else:
|
||||
stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
|
||||
|
||||
#
|
||||
# Database version
|
||||
#
|
||||
|
||||
# This only reports info about the *main* database.
|
||||
stats["database_engine"] = hs.get_datastore().db.engine.module.__name__
|
||||
stats["database_server_version"] = hs.get_datastore().db.engine.server_version
|
||||
|
||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
||||
try:
|
||||
yield hs.get_proxied_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
@@ -497,91 +553,19 @@ def run(hs):
|
||||
reactor.run = profile(reactor.run)
|
||||
|
||||
clock = hs.get_clock()
|
||||
start_time = clock.time()
|
||||
|
||||
stats = {}
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
# currently either 0 or 1
|
||||
stats_process = []
|
||||
def performance_stats_init():
|
||||
_stats_process.clear()
|
||||
_stats_process.append(
|
||||
(int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF))
|
||||
)
|
||||
|
||||
def start_phone_stats_home():
|
||||
return run_as_background_process("phone_stats_home", phone_stats_home)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - start_time)
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["server_context"] = hs.config.server_context
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
version = sys.version_info
|
||||
stats["python_version"] = "{}.{}.{}".format(
|
||||
version.major, version.minor, version.micro
|
||||
return run_as_background_process(
|
||||
"phone_stats_home", phone_stats_home, hs, stats
|
||||
)
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats[
|
||||
"daily_active_rooms"
|
||||
] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
|
||||
if len(stats_process) > 0:
|
||||
stats["memory_rss"] = 0
|
||||
stats["cpu_average"] = 0
|
||||
for process in stats_process:
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
logger.info(
|
||||
"Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
|
||||
)
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
def performance_stats_init():
|
||||
try:
|
||||
process = psutil.Process()
|
||||
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||
# so the next request will use this as the initial point.
|
||||
process.memory_info().rss
|
||||
process.cpu_percent(interval=None)
|
||||
logger.info("report_stats can use psutil")
|
||||
stats_process.append(process)
|
||||
except (AttributeError):
|
||||
logger.warning("Unable to read memory/cpu stats. Disabling reporting.")
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
return run_as_background_process(
|
||||
|
||||
@@ -40,7 +40,6 @@ from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -120,7 +119,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -129,7 +128,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -157,14 +156,10 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -33,10 +33,10 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
@@ -45,7 +45,11 @@ logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore, SlavedAccountDataStore
|
||||
SlavedEventStore,
|
||||
SlavedPusherStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
RoomStore,
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = __func__(
|
||||
DataStore.update_pusher_last_stream_ordering_and_success
|
||||
@@ -114,7 +118,7 @@ class PusherServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -123,7 +127,7 @@ class PusherServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -198,14 +202,10 @@ def start(config_options):
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ps, config, use_worker_options=True)
|
||||
|
||||
@@ -55,7 +55,6 @@ from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
@@ -151,7 +150,7 @@ class SynchrotronPresence(object):
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
return defer.succeed(None)
|
||||
|
||||
get_states = __func__(PresenceHandler.get_states)
|
||||
get_state = __func__(PresenceHandler.get_state)
|
||||
@@ -326,7 +325,7 @@ class SynchrotronServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -335,7 +334,7 @@ class SynchrotronServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -437,14 +436,10 @@ def start(config_options):
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
|
||||
+6
-10
@@ -43,7 +43,7 @@ from synapse.replication.tcp.streams.events import (
|
||||
from synapse.rest.client.v2_alpha import user_directory
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.database import Database
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
@@ -60,11 +60,11 @@ class UserDirectorySlaveStore(
|
||||
UserDirectoryStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||
def __init__(self, database: Database, db_conn, hs):
|
||||
super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||
curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
|
||||
db_conn,
|
||||
"current_state_delta_stream",
|
||||
entity_column="room_id",
|
||||
@@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
@@ -199,8 +199,6 @@ def start(config_options):
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.update_user_directory:
|
||||
sys.stderr.write(
|
||||
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||
@@ -215,10 +213,8 @@ def start(config_options):
|
||||
|
||||
ss = UserDirectoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
@@ -94,7 +94,9 @@ class ApplicationService(object):
|
||||
ip_range_whitelist=None,
|
||||
):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.url = (
|
||||
url.rstrip("/") if isinstance(url, str) else None
|
||||
) # url must not end with a slash
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.server_name = hostname
|
||||
|
||||
@@ -185,7 +185,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
"query_3pe_protocol to %s did not return a" " valid result", uri
|
||||
"query_3pe_protocol to %s did not return a valid result", uri
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
for regex_obj in as_info["namespaces"][ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError(
|
||||
"Expected namespace entry in %s to be an object," " but got %s",
|
||||
"Expected namespace entry in %s to be an object, but got %s",
|
||||
ns,
|
||||
regex_obj,
|
||||
)
|
||||
|
||||
@@ -35,11 +35,11 @@ class CaptchaConfig(Config):
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
# This homeserver's ReCAPTCHA public key.
|
||||
#
|
||||
#recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
# This homeserver's ReCAPTCHA private key.
|
||||
#
|
||||
#recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
|
||||
@@ -146,6 +146,8 @@ class EmailConfig(Config):
|
||||
if k not in email_config:
|
||||
missing.append("email." + k)
|
||||
|
||||
# public_baseurl is required to build password reset and validation links that
|
||||
# will be emailed to users
|
||||
if config.get("public_baseurl") is None:
|
||||
missing.append("public_baseurl")
|
||||
|
||||
@@ -305,8 +307,23 @@ class EmailConfig(Config):
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: false
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
#
|
||||
# # notif_from defines the "From" address to use when sending emails.
|
||||
# # It must be set if email sending is enabled.
|
||||
# #
|
||||
# # The placeholder '%(app)s' will be replaced by the application name,
|
||||
# # which is normally 'app_name' (below), but may be overridden by the
|
||||
# # Matrix client application.
|
||||
# #
|
||||
# # Note that the placeholder must be written '%(app)s', including the
|
||||
# # trailing 's'.
|
||||
# #
|
||||
# notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
|
||||
#
|
||||
# # app_name defines the default value for '%(app)s' in notif_from. It
|
||||
# # defaults to 'Matrix'.
|
||||
# #
|
||||
# #app_name: my_branded_matrix_server
|
||||
#
|
||||
# # Enable email notifications by default
|
||||
# #
|
||||
|
||||
@@ -125,7 +125,7 @@ class KeyConfig(Config):
|
||||
|
||||
# if neither trusted_key_servers nor perspectives are given, use the default.
|
||||
if "perspectives" not in config and "trusted_key_servers" not in config:
|
||||
logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
key_servers = [{"server_name": "matrix.org"}]
|
||||
else:
|
||||
key_servers = config.get("trusted_key_servers", [])
|
||||
@@ -156,7 +156,7 @@ class KeyConfig(Config):
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
logger.warning("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
|
||||
@@ -182,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None):
|
||||
logger = logging.getLogger("")
|
||||
|
||||
if not log_config:
|
||||
logger.warn("Reloaded a blank config?")
|
||||
logger.warning("Reloaded a blank config?")
|
||||
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
@@ -234,8 +234,8 @@ def setup_logging(
|
||||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
logging.warn("***** STARTING SERVER *****")
|
||||
logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
|
||||
logging.warning("***** STARTING SERVER *****")
|
||||
logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
|
||||
logging.info("Server hostname: %s", config.server_name)
|
||||
|
||||
return logger
|
||||
|
||||
@@ -106,6 +106,13 @@ class RegistrationConfig(Config):
|
||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||
if self.account_threepid_delegate_msisdn and not self.public_baseurl:
|
||||
raise ConfigError(
|
||||
"The configuration option `public_baseurl` is required if "
|
||||
"`account_threepid_delegate.msisdn` is set, such that "
|
||||
"clients know where to submit validation tokens to. Please "
|
||||
"configure `public_baseurl`."
|
||||
)
|
||||
|
||||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
@@ -300,7 +307,7 @@ class RegistrationConfig(Config):
|
||||
# If a delegate is specified, the config option public_baseurl must also be filled out.
|
||||
#
|
||||
account_threepid_delegates:
|
||||
#email: https://example.com # Delegate email sending to example.org
|
||||
#email: https://example.com # Delegate email sending to example.com
|
||||
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
|
||||
@@ -170,7 +170,7 @@ class _RoomDirectoryRule(object):
|
||||
self.action = action
|
||||
else:
|
||||
raise ConfigError(
|
||||
"%s rules can only have action of 'allow'" " or 'deny'" % (option_name,)
|
||||
"%s rules can only have action of 'allow' or 'deny'" % (option_name,)
|
||||
)
|
||||
|
||||
self._alias_matches_all = alias == "*"
|
||||
|
||||
+123
-57
@@ -14,17 +14,19 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import logging
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
from synapse.types import (
|
||||
map_username_to_mxid_localpart,
|
||||
mxid_localpart_allowed_characters,
|
||||
)
|
||||
from synapse.util.module_loader import load_python_module
|
||||
from synapse.util.module_loader import load_module, load_python_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = (
|
||||
"synapse.handlers.saml_handler.DefaultSamlMappingProvider"
|
||||
)
|
||||
|
||||
|
||||
def _dict_merge(merge_dict, into_dict):
|
||||
"""Do a deep merge of two dicts
|
||||
@@ -75,15 +77,69 @@ class SAML2Config(Config):
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
self.saml2_mxid_source_attribute = saml2_config.get(
|
||||
"mxid_source_attribute", "uid"
|
||||
)
|
||||
|
||||
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
|
||||
"grandfathered_mxid_source_attribute", "uid"
|
||||
)
|
||||
|
||||
saml2_config_dict = self._default_saml_config_dict()
|
||||
# user_mapping_provider may be None if the key is present but has no value
|
||||
ump_dict = saml2_config.get("user_mapping_provider") or {}
|
||||
|
||||
# Use the default user mapping provider if not set
|
||||
ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
|
||||
|
||||
# Ensure a config is present
|
||||
ump_dict["config"] = ump_dict.get("config") or {}
|
||||
|
||||
if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER:
|
||||
# Load deprecated options for use by the default module
|
||||
old_mxid_source_attribute = saml2_config.get("mxid_source_attribute")
|
||||
if old_mxid_source_attribute:
|
||||
logger.warning(
|
||||
"The config option saml2_config.mxid_source_attribute is deprecated. "
|
||||
"Please use saml2_config.user_mapping_provider.config"
|
||||
".mxid_source_attribute instead."
|
||||
)
|
||||
ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute
|
||||
|
||||
old_mxid_mapping = saml2_config.get("mxid_mapping")
|
||||
if old_mxid_mapping:
|
||||
logger.warning(
|
||||
"The config option saml2_config.mxid_mapping is deprecated. Please "
|
||||
"use saml2_config.user_mapping_provider.config.mxid_mapping instead."
|
||||
)
|
||||
ump_dict["config"]["mxid_mapping"] = old_mxid_mapping
|
||||
|
||||
# Retrieve an instance of the module's class
|
||||
# Pass the config dictionary to the module for processing
|
||||
(
|
||||
self.saml2_user_mapping_provider_class,
|
||||
self.saml2_user_mapping_provider_config,
|
||||
) = load_module(ump_dict)
|
||||
|
||||
# Ensure loaded user mapping module has defined all necessary methods
|
||||
# Note parse_config() is already checked during the call to load_module
|
||||
required_methods = [
|
||||
"get_saml_attributes",
|
||||
"saml_response_to_user_attributes",
|
||||
]
|
||||
missing_methods = [
|
||||
method
|
||||
for method in required_methods
|
||||
if not hasattr(self.saml2_user_mapping_provider_class, method)
|
||||
]
|
||||
if missing_methods:
|
||||
raise ConfigError(
|
||||
"Class specified by saml2_config."
|
||||
"user_mapping_provider.module is missing required "
|
||||
"methods: %s" % (", ".join(missing_methods),)
|
||||
)
|
||||
|
||||
# Get the desired saml auth response attributes from the module
|
||||
saml2_config_dict = self._default_saml_config_dict(
|
||||
*self.saml2_user_mapping_provider_class.get_saml_attributes(
|
||||
self.saml2_user_mapping_provider_config
|
||||
)
|
||||
)
|
||||
_dict_merge(
|
||||
merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict
|
||||
)
|
||||
@@ -103,22 +159,27 @@ class SAML2Config(Config):
|
||||
saml2_config.get("saml_session_lifetime", "5m")
|
||||
)
|
||||
|
||||
mapping = saml2_config.get("mxid_mapping", "hexencode")
|
||||
try:
|
||||
self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping]
|
||||
except KeyError:
|
||||
raise ConfigError("%s is not a known mxid_mapping" % (mapping,))
|
||||
def _default_saml_config_dict(
|
||||
self, required_attributes: set, optional_attributes: set
|
||||
):
|
||||
"""Generate a configuration dictionary with required and optional attributes that
|
||||
will be needed to process new user registration
|
||||
|
||||
def _default_saml_config_dict(self):
|
||||
Args:
|
||||
required_attributes: SAML auth response attributes that are
|
||||
necessary to function
|
||||
optional_attributes: SAML auth response attributes that can be used to add
|
||||
additional information to Synapse user accounts, but are not required
|
||||
|
||||
Returns:
|
||||
dict: A SAML configuration dictionary
|
||||
"""
|
||||
import saml2
|
||||
|
||||
public_baseurl = self.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError("saml2_config requires a public_baseurl to be set")
|
||||
|
||||
required_attributes = {"uid", self.saml2_mxid_source_attribute}
|
||||
|
||||
optional_attributes = {"displayName"}
|
||||
if self.saml2_grandfathered_mxid_source_attribute:
|
||||
optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
|
||||
optional_attributes -= required_attributes
|
||||
@@ -207,33 +268,58 @@ class SAML2Config(Config):
|
||||
#
|
||||
#config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
|
||||
# the lifetime of a SAML session. This defines how long a user has to
|
||||
# The lifetime of a SAML session. This defines how long a user has to
|
||||
# complete the authentication process, if allow_unsolicited is unset.
|
||||
# The default is 5 minutes.
|
||||
#
|
||||
#saml_session_lifetime: 5m
|
||||
|
||||
# The SAML attribute (after mapping via the attribute maps) to use to derive
|
||||
# the Matrix ID from. 'uid' by default.
|
||||
# An external module can be provided here as a custom solution to
|
||||
# mapping attributes returned from a saml provider onto a matrix user.
|
||||
#
|
||||
#mxid_source_attribute: displayName
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
#
|
||||
#module: mapping_provider.SamlMappingProvider
|
||||
|
||||
# The mapping system to use for mapping the saml attribute onto a matrix ID.
|
||||
# Options include:
|
||||
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||
# * 'dotreplace' (which replaces unpermitted characters with '.').
|
||||
# The default is 'hexencode'.
|
||||
#
|
||||
#mxid_mapping: dotreplace
|
||||
# Custom configuration values for the module. Below options are
|
||||
# intended for the built-in provider, they should be changed if
|
||||
# using a custom module. This section will be passed as a Python
|
||||
# dictionary to the module's `parse_config` method.
|
||||
#
|
||||
config:
|
||||
# The SAML attribute (after mapping via the attribute maps) to use
|
||||
# to derive the Matrix ID from. 'uid' by default.
|
||||
#
|
||||
# Note: This used to be configured by the
|
||||
# saml2_config.mxid_source_attribute option. If that is still
|
||||
# defined, its value will be used instead.
|
||||
#
|
||||
#mxid_source_attribute: displayName
|
||||
|
||||
# In previous versions of synapse, the mapping from SAML attribute to MXID was
|
||||
# always calculated dynamically rather than stored in a table. For backwards-
|
||||
# compatibility, we will look for user_ids matching such a pattern before
|
||||
# creating a new account.
|
||||
# The mapping system to use for mapping the saml attribute onto a
|
||||
# matrix ID.
|
||||
#
|
||||
# Options include:
|
||||
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||
# * 'dotreplace' (which replaces unpermitted characters with
|
||||
# '.').
|
||||
# The default is 'hexencode'.
|
||||
#
|
||||
# Note: This used to be configured by the
|
||||
# saml2_config.mxid_mapping option. If that is still defined, its
|
||||
# value will be used instead.
|
||||
#
|
||||
#mxid_mapping: dotreplace
|
||||
|
||||
# In previous versions of synapse, the mapping from SAML attribute to
|
||||
# MXID was always calculated dynamically rather than stored in a
|
||||
# table. For backwards- compatibility, we will look for user_ids
|
||||
# matching such a pattern before creating a new account.
|
||||
#
|
||||
# This setting controls the SAML attribute which will be used for this
|
||||
# backwards-compatibility lookup. Typically it should be 'uid', but if the
|
||||
# attribute maps are changed, it may be necessary to change it.
|
||||
# backwards-compatibility lookup. Typically it should be 'uid', but if
|
||||
# the attribute maps are changed, it may be necessary to change it.
|
||||
#
|
||||
# The default is 'uid'.
|
||||
#
|
||||
@@ -241,23 +327,3 @@ class SAML2Config(Config):
|
||||
""" % {
|
||||
"config_dir_path": config_dir_path
|
||||
}
|
||||
|
||||
|
||||
DOT_REPLACE_PATTERN = re.compile(
|
||||
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
|
||||
)
|
||||
|
||||
|
||||
def dot_replace_for_mxid(username: str) -> str:
|
||||
username = username.lower()
|
||||
username = DOT_REPLACE_PATTERN.sub(".", username)
|
||||
|
||||
# regular mxids aren't allowed to start with an underscore either
|
||||
username = re.sub("^_", "", username)
|
||||
return username
|
||||
|
||||
|
||||
MXID_MAPPER_MAP = {
|
||||
"hexencode": map_username_to_mxid_localpart,
|
||||
"dotreplace": dot_replace_for_mxid,
|
||||
}
|
||||
|
||||
+204
-19
@@ -19,7 +19,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import List
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -41,7 +41,7 @@ logger = logging.Logger(__name__)
|
||||
# in the list.
|
||||
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
||||
|
||||
DEFAULT_ROOM_VERSION = "4"
|
||||
DEFAULT_ROOM_VERSION = "5"
|
||||
|
||||
ROOM_COMPLEXITY_TOO_GREAT = (
|
||||
"Your homeserver is unable to join rooms this large or complex. "
|
||||
@@ -118,15 +118,16 @@ class ServerConfig(Config):
|
||||
self.allow_public_rooms_without_auth = False
|
||||
self.allow_public_rooms_over_federation = False
|
||||
else:
|
||||
# If set to 'False', requires authentication to access the server's public
|
||||
# rooms directory through the client API. Defaults to 'True'.
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
self.allow_public_rooms_without_auth = config.get(
|
||||
"allow_public_rooms_without_auth", True
|
||||
"allow_public_rooms_without_auth", False
|
||||
)
|
||||
# If set to 'False', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'True'.
|
||||
# If set to 'true', allows any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'false'.
|
||||
self.allow_public_rooms_over_federation = config.get(
|
||||
"allow_public_rooms_over_federation", True
|
||||
"allow_public_rooms_over_federation", False
|
||||
)
|
||||
|
||||
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
|
||||
@@ -223,7 +224,7 @@ class ServerConfig(Config):
|
||||
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e
|
||||
"Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
|
||||
)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
@@ -246,6 +247,124 @@ class ServerConfig(Config):
|
||||
# events with profile information that differ from the target's global profile.
|
||||
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
||||
|
||||
retention_config = config.get("retention")
|
||||
if retention_config is None:
|
||||
retention_config = {}
|
||||
|
||||
self.retention_enabled = retention_config.get("enabled", False)
|
||||
|
||||
retention_default_policy = retention_config.get("default_policy")
|
||||
|
||||
if retention_default_policy is not None:
|
||||
self.retention_default_min_lifetime = retention_default_policy.get(
|
||||
"min_lifetime"
|
||||
)
|
||||
if self.retention_default_min_lifetime is not None:
|
||||
self.retention_default_min_lifetime = self.parse_duration(
|
||||
self.retention_default_min_lifetime
|
||||
)
|
||||
|
||||
self.retention_default_max_lifetime = retention_default_policy.get(
|
||||
"max_lifetime"
|
||||
)
|
||||
if self.retention_default_max_lifetime is not None:
|
||||
self.retention_default_max_lifetime = self.parse_duration(
|
||||
self.retention_default_max_lifetime
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_default_min_lifetime is not None
|
||||
and self.retention_default_max_lifetime is not None
|
||||
and (
|
||||
self.retention_default_min_lifetime
|
||||
> self.retention_default_max_lifetime
|
||||
)
|
||||
):
|
||||
raise ConfigError(
|
||||
"The default retention policy's 'min_lifetime' can not be greater"
|
||||
" than its 'max_lifetime'"
|
||||
)
|
||||
else:
|
||||
self.retention_default_min_lifetime = None
|
||||
self.retention_default_max_lifetime = None
|
||||
|
||||
self.retention_allowed_lifetime_min = retention_config.get(
|
||||
"allowed_lifetime_min"
|
||||
)
|
||||
if self.retention_allowed_lifetime_min is not None:
|
||||
self.retention_allowed_lifetime_min = self.parse_duration(
|
||||
self.retention_allowed_lifetime_min
|
||||
)
|
||||
|
||||
self.retention_allowed_lifetime_max = retention_config.get(
|
||||
"allowed_lifetime_max"
|
||||
)
|
||||
if self.retention_allowed_lifetime_max is not None:
|
||||
self.retention_allowed_lifetime_max = self.parse_duration(
|
||||
self.retention_allowed_lifetime_max
|
||||
)
|
||||
|
||||
if (
|
||||
self.retention_allowed_lifetime_min is not None
|
||||
and self.retention_allowed_lifetime_max is not None
|
||||
and self.retention_allowed_lifetime_min
|
||||
> self.retention_allowed_lifetime_max
|
||||
):
|
||||
raise ConfigError(
|
||||
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
if interval_config is None:
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration must have the"
|
||||
" 'interval' key set."
|
||||
)
|
||||
|
||||
interval = self.parse_duration(interval_config)
|
||||
|
||||
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
|
||||
|
||||
if shortest_max_lifetime is not None:
|
||||
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
|
||||
|
||||
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
|
||||
|
||||
if longest_max_lifetime is not None:
|
||||
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
|
||||
|
||||
if (
|
||||
shortest_max_lifetime is not None
|
||||
and longest_max_lifetime is not None
|
||||
and shortest_max_lifetime > longest_max_lifetime
|
||||
):
|
||||
raise ConfigError(
|
||||
"A retention policy's purge jobs configuration's"
|
||||
" 'shortest_max_lifetime' value can not be greater than its"
|
||||
" 'longest_max_lifetime' value."
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
{
|
||||
"interval": interval,
|
||||
"shortest_max_lifetime": shortest_max_lifetime,
|
||||
"longest_max_lifetime": longest_max_lifetime,
|
||||
}
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
{
|
||||
"interval": self.parse_duration("1d"),
|
||||
"shortest_max_lifetime": None,
|
||||
"longest_max_lifetime": None,
|
||||
}
|
||||
]
|
||||
|
||||
self.listeners = [] # type: List[dict]
|
||||
for listener in config.get("listeners", []):
|
||||
if not isinstance(listener.get("port", None), int):
|
||||
@@ -372,6 +491,8 @@ class ServerConfig(Config):
|
||||
"cleanup_extremities_with_dummy_events", True
|
||||
)
|
||||
|
||||
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(l["tls"] for l in self.listeners)
|
||||
|
||||
@@ -500,15 +621,16 @@ class ServerConfig(Config):
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
# If set to 'false', requires authentication to access the server's public rooms
|
||||
# directory through the client API. Defaults to 'true'.
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
#
|
||||
#allow_public_rooms_without_auth: false
|
||||
#allow_public_rooms_without_auth: true
|
||||
|
||||
# If set to 'false', forbids any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'true'.
|
||||
# If set to 'true', allows any other homeserver to fetch the server's public
|
||||
# rooms directory via federation. Defaults to 'false'.
|
||||
#
|
||||
#allow_public_rooms_over_federation: false
|
||||
#allow_public_rooms_over_federation: true
|
||||
|
||||
# The default room version for newly created rooms.
|
||||
#
|
||||
@@ -721,7 +843,7 @@ class ServerConfig(Config):
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
# Resource-constrained homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
@@ -761,6 +883,69 @@ class ServerConfig(Config):
|
||||
# Defaults to `28d`. Set to `null` to disable clearing out of old rows.
|
||||
#
|
||||
#user_ips_max_age: 14d
|
||||
|
||||
# Message retention policy at the server level.
|
||||
#
|
||||
# Room admins and mods can define a retention period for their rooms using the
|
||||
# 'm.room.retention' state event, and server admins can cap this period by setting
|
||||
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
|
||||
#
|
||||
# If this feature is enabled, Synapse will regularly look for and purge events
|
||||
# which are older than the room's maximum retention period. Synapse will also
|
||||
# filter events received over federation so that events that should have been
|
||||
# purged are ignored and not stored again.
|
||||
#
|
||||
retention:
|
||||
# The message retention policies feature is disabled by default. Uncomment the
|
||||
# following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# Default retention policy. If set, Synapse will apply it to rooms that lack the
|
||||
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
|
||||
# matter much because Synapse doesn't take it into account yet.
|
||||
#
|
||||
#default_policy:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, a user won't be able to send a
|
||||
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
|
||||
# that's not within this range. This is especially useful in closed federations,
|
||||
# in which server admins can make sure every federating server applies the same
|
||||
# rules.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
|
||||
# Server admins can define the settings of the background jobs purging the
|
||||
# events which lifetime has expired under the 'purge_jobs' section.
|
||||
#
|
||||
# If no configuration is provided, a single job will be set up to delete expired
|
||||
# events in every room daily.
|
||||
#
|
||||
# Each job's configuration defines which range of message lifetimes the job
|
||||
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
|
||||
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
|
||||
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
|
||||
# lower than or equal to 3 days. Both the minimum and the maximum value of a
|
||||
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
|
||||
# that purge to be performed by a job that's iterating over every room it knows,
|
||||
# which would be quite heavy on the server.
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# interval: 5m:
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 24h
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
@@ -781,20 +966,20 @@ class ServerConfig(Config):
|
||||
"--daemonize",
|
||||
action="store_true",
|
||||
default=None,
|
||||
help="Daemonize the home server",
|
||||
help="Daemonize the homeserver",
|
||||
)
|
||||
server_group.add_argument(
|
||||
"--print-pidfile",
|
||||
action="store_true",
|
||||
default=None,
|
||||
help="Print the path to the pidfile just" " before daemonizing",
|
||||
help="Print the path to the pidfile just before daemonizing",
|
||||
)
|
||||
server_group.add_argument(
|
||||
"--manhole",
|
||||
metavar="PORT",
|
||||
dest="manhole",
|
||||
type=int,
|
||||
help="Turn on the twisted telnet manhole" " service on the given port.",
|
||||
help="Turn on the twisted telnet manhole service on the given port.",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -125,9 +125,11 @@ def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
redact_json = prune_event_dict(event_dict)
|
||||
redact_json.pop("age_ts", None)
|
||||
redact_json.pop("unsigned", None)
|
||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||
redact_json = sign_json(redact_json, signature_name, signing_key)
|
||||
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
|
||||
return redact_json["signatures"]
|
||||
|
||||
|
||||
|
||||
@@ -42,6 +42,8 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
Returns:
|
||||
if the auth checks pass.
|
||||
"""
|
||||
assert isinstance(auth_events, dict)
|
||||
|
||||
if do_size_check:
|
||||
_check_size_limits(event)
|
||||
|
||||
@@ -74,12 +76,6 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
return
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
|
||||
+182
-128
@@ -12,104 +12,125 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Dict, Optional, Tuple, Union
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
@attr.s(slots=True)
|
||||
class EventContext:
|
||||
"""
|
||||
Holds information relevant to persisting an event
|
||||
|
||||
Attributes:
|
||||
state_group (int|None): state group id, if the state has been stored
|
||||
as a state group. This is usually only None if e.g. the event is
|
||||
an outlier.
|
||||
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||
False
|
||||
rejected: A rejection reason if the event was rejected, else False
|
||||
|
||||
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
||||
tuples
|
||||
_state_group: The ID of the state group for this event. Note that state events
|
||||
are persisted with a state group which includes the new event, so this is
|
||||
effectively the state *after* the event in question.
|
||||
|
||||
prev_group (int): Previously persisted state group. ``None`` for an
|
||||
outlier.
|
||||
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||
(type, state_key) -> event_id. ``None`` for an outlier.
|
||||
For a *rejected* state event, where the state of the rejected event is
|
||||
ignored, this state_group should never make it into the
|
||||
event_to_state_groups table. Indeed, inspecting this value for a rejected
|
||||
state event is almost certainly incorrect.
|
||||
|
||||
prev_state_events (?): XXX: is this ever set to anything other than
|
||||
the empty list?
|
||||
For an outlier, where we don't have the state at the event, this will be
|
||||
None.
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
the ``state_group`` property.
|
||||
|
||||
state_group_before_event: The ID of the state group representing the state
|
||||
of the room before this event.
|
||||
|
||||
If this is a non-state event, this will be the same as ``state_group``. If
|
||||
it's a state event, it will be the same as ``prev_group``.
|
||||
|
||||
If ``state_group`` is None (ie, the event is an outlier),
|
||||
``state_group_before_event`` will always also be ``None``.
|
||||
|
||||
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
|
||||
None does not necessarily mean that ``state_group`` does not have
|
||||
a prev_group!
|
||||
|
||||
If the event is a state event, this is normally the same as ``prev_group``.
|
||||
|
||||
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
|
||||
will always also be ``None``.
|
||||
|
||||
Note that this *not* (necessarily) the state group associated with
|
||||
``_prev_state_ids``.
|
||||
|
||||
delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
|
||||
and ``state_group``.
|
||||
|
||||
app_service: If this event is being sent by a (local) application service, that
|
||||
app service.
|
||||
|
||||
_current_state_ids: The room state map, including this event - ie, the state
|
||||
in ``state_group``.
|
||||
|
||||
_current_state_ids (dict[(str, str), str]|None):
|
||||
The current state map including the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_prev_state_ids (dict[(str, str), str]|None):
|
||||
The current state map excluding the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
FIXME: what is this for an outlier? it seems ill-defined. It seems like
|
||||
it could be either {}, or the state we were given by the remote
|
||||
server, depending on $THINGS
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
on-demand: it will be None until that happens.
|
||||
|
||||
_prev_state_ids: The room state map, excluding this event - ie, the state
|
||||
in ``state_group_before_event``. For a non-state
|
||||
event, this will be the same as _current_state_events.
|
||||
|
||||
Note that it is a completely different thing to prev_group!
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
FIXME: again, what is this for an outlier?
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
Only set when state has not been fetched yet.
|
||||
|
||||
_event_state_key (str|None): The state_key of the event the context is
|
||||
associated with. Only set when state has not been fetched yet.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
Only set when state has not been fetched yet.
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"state_group",
|
||||
"rejected",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
"app_service",
|
||||
"_current_state_ids",
|
||||
"_prev_state_ids",
|
||||
"_prev_state_id",
|
||||
"_event_type",
|
||||
"_event_state_key",
|
||||
"_fetching_state_deferred",
|
||||
]
|
||||
rejected = attr.ib(default=False, type=Union[bool, str])
|
||||
_state_group = attr.ib(default=None, type=Optional[int])
|
||||
state_group_before_event = attr.ib(default=None, type=Optional[int])
|
||||
prev_group = attr.ib(default=None, type=Optional[int])
|
||||
delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
app_service = attr.ib(default=None, type=Optional[ApplicationService])
|
||||
|
||||
def __init__(self):
|
||||
self.prev_state_events = []
|
||||
self.rejected = False
|
||||
self.app_service = None
|
||||
_current_state_ids = attr.ib(
|
||||
default=None, type=Optional[Dict[Tuple[str, str], str]]
|
||||
)
|
||||
_prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
|
||||
state_group,
|
||||
state_group_before_event,
|
||||
current_state_ids,
|
||||
prev_state_ids,
|
||||
prev_group=None,
|
||||
delta_ids=None,
|
||||
):
|
||||
context = EventContext()
|
||||
|
||||
# The current state including the current event
|
||||
context._current_state_ids = current_state_ids
|
||||
# The current state excluding the current event
|
||||
context._prev_state_ids = prev_state_ids
|
||||
context.state_group = state_group
|
||||
|
||||
context._prev_state_id = None
|
||||
context._event_type = None
|
||||
context._event_state_key = None
|
||||
context._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
context.prev_group = prev_group
|
||||
context.delta_ids = delta_ids
|
||||
|
||||
return context
|
||||
return EventContext(
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
state_group=state_group,
|
||||
state_group_before_event=state_group_before_event,
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def serialize(self, event, store):
|
||||
@@ -137,11 +158,11 @@ class EventContext(object):
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
"state_group": self.state_group,
|
||||
"state_group": self._state_group,
|
||||
"state_group_before_event": self.state_group_before_event,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
}
|
||||
|
||||
@@ -157,24 +178,18 @@ class EventContext(object):
|
||||
Returns:
|
||||
EventContext
|
||||
"""
|
||||
context = EventContext()
|
||||
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
context._prev_state_id = input["prev_state_id"]
|
||||
context._event_type = input["event_type"]
|
||||
context._event_state_key = input["event_state_key"]
|
||||
|
||||
context._current_state_ids = None
|
||||
context._prev_state_ids = None
|
||||
context._fetching_state_deferred = None
|
||||
|
||||
context.state_group = input["state_group"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
context = _AsyncEventContextImpl(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
prev_state_id=input["prev_state_id"],
|
||||
event_type=input["event_type"],
|
||||
event_state_key=input["event_state_key"],
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
prev_group=input["prev_group"],
|
||||
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||
rejected=input["rejected"],
|
||||
)
|
||||
|
||||
app_service_id = input["app_service_id"]
|
||||
if app_service_id:
|
||||
@@ -182,29 +197,52 @@ class EventContext(object):
|
||||
|
||||
return context
|
||||
|
||||
@property
|
||||
def state_group(self) -> Optional[int]:
|
||||
"""The ID of the state group for this event.
|
||||
|
||||
Note that state events are persisted with a state group which includes the new
|
||||
event, so this is effectively the state *after* the event in question.
|
||||
|
||||
For an outlier, where we don't have the state at the event, this will be None.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. Accessing this property will raise an exception
|
||||
if ``rejected`` is set.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_group of rejected event")
|
||||
|
||||
return self._state_group
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_current_state_ids(self, store):
|
||||
"""Gets the current state IDs
|
||||
"""
|
||||
Gets the room state map, including this event - ie, the state in ``state_group``
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
yield self._ensure_fetched(store)
|
||||
return self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_prev_state_ids(self, store):
|
||||
"""Gets the prev state IDs
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
@@ -212,27 +250,64 @@ class EventContext(object):
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
yield self._ensure_fetched(store)
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if we haven't cached the
|
||||
state or if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
def _ensure_fetched(self, store):
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _AsyncEventContextImpl(EventContext):
|
||||
"""
|
||||
An implementation of EventContext which fetches _current_state_ids and
|
||||
_prev_state_ids from the database on demand.
|
||||
|
||||
Attributes:
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
|
||||
_event_state_key (str): The state_key of the event the context is
|
||||
associated with.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
"""
|
||||
|
||||
_prev_state_id = attr.ib(default=None)
|
||||
_event_type = attr.ib(default=None)
|
||||
_event_state_key = attr.ib(default=None)
|
||||
_fetching_state_deferred = attr.ib(default=None)
|
||||
|
||||
def _ensure_fetched(self, store):
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store
|
||||
)
|
||||
|
||||
return make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _fill_out_state(self, store):
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
@@ -250,27 +325,6 @@ class EventContext(object):
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_state(
|
||||
self, state_group, prev_state_ids, current_state_ids, prev_group, delta_ids
|
||||
):
|
||||
"""Replace the state in the context
|
||||
"""
|
||||
|
||||
# We need to make sure we wait for any ongoing fetching of state
|
||||
# to complete so that the updated state doesn't get clobbered
|
||||
if self._fetching_state_deferred:
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
self.state_group = state_group
|
||||
self._prev_state_ids = prev_state_ids
|
||||
self.prev_group = prev_group
|
||||
self._current_state_ids = current_state_ids
|
||||
self.delta_ids = delta_ids
|
||||
|
||||
# We need to ensure that that we've marked as having fetched the state
|
||||
self._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,6 +14,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
from synapse.spam_checker_api import SpamCheckerApi
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
@@ -26,7 +31,14 @@ class SpamChecker(object):
|
||||
pass
|
||||
|
||||
if module is not None:
|
||||
self.spam_checker = module(config=config)
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
if "api" in spam_args.args:
|
||||
api = SpamCheckerApi(hs)
|
||||
self.spam_checker = module(config=config, api=api)
|
||||
else:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six import string_types
|
||||
from six import integer_types, string_types
|
||||
|
||||
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -22,11 +22,12 @@ from synapse.types import EventID, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator(object):
|
||||
def validate_new(self, event):
|
||||
def validate_new(self, event, config):
|
||||
"""Validates the event has roughly the right format
|
||||
|
||||
Args:
|
||||
event (FrozenEvent)
|
||||
event (FrozenEvent): The event to validate.
|
||||
config (Config): The homeserver's configuration.
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
@@ -67,6 +68,99 @@ class EventValidator(object):
|
||||
Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Retention:
|
||||
self._validate_retention(event, config)
|
||||
|
||||
def _validate_retention(self, event, config):
|
||||
"""Checks that an event that defines the retention policy for a room respects the
|
||||
boundaries imposed by the server's administrator.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent): The event to validate.
|
||||
config (Config): The homeserver's configuration.
|
||||
"""
|
||||
min_lifetime = event.content.get("min_lifetime")
|
||||
max_lifetime = event.content.get("max_lifetime")
|
||||
|
||||
if min_lifetime is not None:
|
||||
if not isinstance(min_lifetime, integer_types):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'min_lifetime' must be an integer",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_min is not None
|
||||
and min_lifetime < config.retention_allowed_lifetime_min
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'min_lifetime' can't be lower than the minimum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_max is not None
|
||||
and min_lifetime > config.retention_allowed_lifetime_max
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'min_lifetime' can't be greater than the maximum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if max_lifetime is not None:
|
||||
if not isinstance(max_lifetime, integer_types):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'max_lifetime' must be an integer",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_min is not None
|
||||
and max_lifetime < config.retention_allowed_lifetime_min
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'max_lifetime' can't be lower than the minimum allowed value"
|
||||
" enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
config.retention_allowed_lifetime_max is not None
|
||||
and max_lifetime > config.retention_allowed_lifetime_max
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=(
|
||||
"'max_lifetime' can't be greater than the maximum allowed"
|
||||
" value enforced by the server's administrator"
|
||||
),
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if (
|
||||
min_lifetime is not None
|
||||
and max_lifetime is not None
|
||||
and min_lifetime > max_lifetime
|
||||
):
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg="'min_lifetime' can't be greater than 'max_lifetime",
|
||||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
def validate_builder(self, event):
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
|
||||
@@ -102,7 +102,7 @@ class FederationBase(object):
|
||||
pass
|
||||
|
||||
if not res:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
@@ -173,7 +173,7 @@ class FederationBase(object):
|
||||
return redacted_event
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
@@ -185,7 +185,7 @@ class FederationBase(object):
|
||||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
failure.getErrorMessage(),
|
||||
|
||||
@@ -18,8 +18,6 @@ import copy
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
@@ -39,7 +37,7 @@ from synapse.api.room_versions import (
|
||||
)
|
||||
from synapse.events import builder, room_version_to_event_format
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -177,7 +175,7 @@ class FederationClient(FederationBase):
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote home server to ask.
|
||||
dest (str): The remote homeserver to ask.
|
||||
room_id (str): The room_id to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
@@ -196,7 +194,7 @@ class FederationClient(FederationBase):
|
||||
dest, room_id, extremities, limit
|
||||
)
|
||||
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
logger.debug("backfill transaction_data=%r", transaction_data)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
@@ -227,7 +225,7 @@ class FederationClient(FederationBase):
|
||||
one succeeds.
|
||||
|
||||
Args:
|
||||
destinations (list): Which home servers to query
|
||||
destinations (list): Which homeservers to query
|
||||
event_id (str): event to fetch
|
||||
room_version (str): version of the room
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
@@ -310,162 +308,26 @@ class FederationClient(FederationBase):
|
||||
return signed_pdu
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_state_for_room(self, destination, room_id, event_id):
|
||||
"""Requests all of the room state at a given event from a remote home server.
|
||||
|
||||
Args:
|
||||
destination (str): The remote homeserver to query for the state.
|
||||
room_id (str): The id of the room we're interested in.
|
||||
event_id (str): The id of the event we want the state at.
|
||||
def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
|
||||
"""Calls the /state_ids endpoint to fetch the state at a particular point
|
||||
in the room, and the auth events for the given event
|
||||
|
||||
Returns:
|
||||
Deferred[Tuple[List[EventBase], List[EventBase]]]:
|
||||
A list of events in the state, and a list of events in the auth chain
|
||||
for the given event.
|
||||
Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
|
||||
"""
|
||||
try:
|
||||
# First we try and ask for just the IDs, as thats far quicker if
|
||||
# we have most of the state and auth_chain already.
|
||||
# However, this may 404 if the other side has an old synapse.
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
|
||||
destination, room_id, set(state_event_ids + auth_event_ids)
|
||||
)
|
||||
|
||||
if failed_to_fetch:
|
||||
logger.warning(
|
||||
"Failed to fetch missing state/auth events for %s: %s",
|
||||
room_id,
|
||||
failed_to_fetch,
|
||||
)
|
||||
|
||||
event_map = {ev.event_id: ev for ev in fetched_events}
|
||||
|
||||
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
||||
auth_chain = [
|
||||
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
return pdus, auth_chain
|
||||
except HttpResponseException as e:
|
||||
if e.code == 400 or e.code == 404:
|
||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||
else:
|
||||
raise e
|
||||
|
||||
result = yield self.transport_layer.get_room_state(
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
pdus = [
|
||||
event_from_pdu_json(p, format_ver, outlier=True) for p in result["pdus"]
|
||||
]
|
||||
if not isinstance(state_event_ids, list) or not isinstance(
|
||||
auth_event_ids, list
|
||||
):
|
||||
raise Exception("invalid response from /state_ids")
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, format_ver, outlier=True)
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
seen_events = yield self.store.get_events(
|
||||
[ev.event_id for ev in itertools.chain(pdus, auth_chain)]
|
||||
)
|
||||
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
[p for p in pdus if p.event_id not in seen_events],
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
signed_pdus.extend(
|
||||
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
[p for p in auth_chain if p.event_id not in seen_events],
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
signed_auth.extend(
|
||||
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return signed_pdus, signed_auth
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
||||
"""Fetch events from a remote destination, checking if we already have them.
|
||||
|
||||
Args:
|
||||
destination (str)
|
||||
room_id (str)
|
||||
event_ids (list)
|
||||
|
||||
Returns:
|
||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||
events and the second is a list of event ids that we failed to fetch.
|
||||
"""
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = list(seen_events.values())
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
missing_events = set(event_ids)
|
||||
for k in seen_events:
|
||||
missing_events.discard(k)
|
||||
|
||||
if not missing_events:
|
||||
return signed_events, failed_to_fetch
|
||||
|
||||
logger.debug(
|
||||
"Fetching unknown state/auth events %s for room %s",
|
||||
missing_events,
|
||||
event_ids,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in range(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i : i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
run_in_background(
|
||||
self.get_pdu,
|
||||
destinations=[destination],
|
||||
event_id=e_id,
|
||||
room_version=room_version,
|
||||
)
|
||||
for e_id in batch
|
||||
]
|
||||
|
||||
res = yield make_deferred_yieldable(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for success, result in res:
|
||||
if success and result:
|
||||
signed_events.append(result)
|
||||
batch.discard(result.event_id)
|
||||
|
||||
# We removed all events we successfully fetched from `batch`
|
||||
failed_to_fetch.update(batch)
|
||||
|
||||
return signed_events, failed_to_fetch
|
||||
return state_event_ids, auth_event_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -522,12 +384,12 @@ class FederationClient(FederationBase):
|
||||
res = yield callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
@@ -535,7 +397,9 @@ class FederationClient(FederationBase):
|
||||
e.args[0],
|
||||
)
|
||||
except Exception:
|
||||
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=1
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
|
||||
@@ -553,7 +417,7 @@ class FederationClient(FederationBase):
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
destinations (Iterable[str]): Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
room_id (str): The room in which the event will happen.
|
||||
user_id (str): The user whose membership is being evented.
|
||||
@@ -662,13 +526,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_join(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
content = yield self._do_send_join(destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
@@ -735,6 +593,44 @@ class FederationClient(FederationBase):
|
||||
|
||||
return self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_join(self, destination, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_join_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
return content
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_join_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
# We expect the v1 API to respond with [200, content], so we only return the
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
@@ -844,18 +740,50 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_leave(
|
||||
content = yield self._do_send_leave(destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
return None
|
||||
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_leave(self, destination, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_leave_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
return None
|
||||
return content
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
# If we receive an error response that isn't a generic error, or an
|
||||
# unrecognised endpoint error, we assume that the remote understands
|
||||
# the v2 invite API and this is a legitimate error.
|
||||
if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]:
|
||||
raise err
|
||||
else:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = yield self.transport_layer.send_leave_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
# We expect the v1 API to respond with [200, content], so we only return the
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
def get_public_rooms(
|
||||
self,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2019 Matrix.org Federation C.I.C
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -21,7 +22,6 @@ from six import iteritems
|
||||
from canonicaljson import json
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
@@ -74,6 +74,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.handler = hs.get_handlers().federation_handler
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
@@ -86,14 +87,12 @@ class FederationServer(FederationBase):
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
async def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
pdus = await self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
@@ -101,9 +100,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
return 200, res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, origin, transaction_data):
|
||||
async def on_incoming_transaction(self, origin, transaction_data):
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
@@ -118,18 +115,17 @@ class FederationServer(FederationBase):
|
||||
# use a linearizer to ensure that we don't process the same transaction
|
||||
# multiple times in parallel.
|
||||
with (
|
||||
yield self._transaction_linearizer.queue(
|
||||
await self._transaction_linearizer.queue(
|
||||
(origin, transaction.transaction_id)
|
||||
)
|
||||
):
|
||||
result = yield self._handle_incoming_transaction(
|
||||
result = await self._handle_incoming_transaction(
|
||||
origin, transaction, request_time
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
async def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||
""" Process an incoming transaction and return the HTTP response
|
||||
|
||||
Args:
|
||||
@@ -140,7 +136,7 @@ class FederationServer(FederationBase):
|
||||
Returns:
|
||||
Deferred[(int, object)]: http response code and body
|
||||
"""
|
||||
response = yield self.transaction_actions.have_responded(origin, transaction)
|
||||
response = await self.transaction_actions.have_responded(origin, transaction)
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
@@ -151,7 +147,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
# Reject if PDU count > 50 and EDU count > 100
|
||||
# Reject if PDU count > 50 or EDU count > 100
|
||||
if len(transaction.pdus) > 50 or (
|
||||
hasattr(transaction, "edus") and len(transaction.edus) > 100
|
||||
):
|
||||
@@ -159,7 +155,7 @@ class FederationServer(FederationBase):
|
||||
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
||||
|
||||
response = {}
|
||||
yield self.transaction_actions.set_response(
|
||||
await self.transaction_actions.set_response(
|
||||
origin, transaction, 400, response
|
||||
)
|
||||
return 400, response
|
||||
@@ -195,7 +191,7 @@ class FederationServer(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
@@ -221,13 +217,12 @@ class FederationServer(FederationBase):
|
||||
# require callouts to other servers to fetch missing events), but
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_pdus_for_room(room_id):
|
||||
async def process_pdus_for_room(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
@@ -237,10 +232,10 @@ class FederationServer(FederationBase):
|
||||
event_id = pdu.event_id
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
yield self._handle_received_pdu(origin, pdu)
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
@@ -251,36 +246,30 @@ class FederationServer(FederationBase):
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
yield concurrently_execute(
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
)
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
yield self.received_edu(origin, edu.edu_type, edu.content)
|
||||
await self.received_edu(origin, edu.edu_type, edu.content)
|
||||
|
||||
response = {"pdus": pdu_results}
|
||||
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||
return 200, response
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
async def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
yield self.registry.on_edu(edu_type, origin, content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
await self.registry.on_edu(edu_type, origin, content)
|
||||
|
||||
async def on_context_state_request(self, origin, room_id, event_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -289,75 +278,79 @@ class FederationServer(FederationBase):
|
||||
# in the cache so we could return it without waiting for the linearizer
|
||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||
# the point of the linearizer.
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
room_id,
|
||||
event_id,
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
resp = dict(
|
||||
await self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
room_id,
|
||||
event_id,
|
||||
)
|
||||
)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
resp["room_version"] = room_version
|
||||
|
||||
return 200, resp
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
async def on_state_ids_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
|
||||
auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
async def _on_context_state_request_compute(self, room_id, event_id):
|
||||
if event_id:
|
||||
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
||||
else:
|
||||
pdus = (await self.state.get_current_state(room_id)).values()
|
||||
|
||||
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
||||
|
||||
return {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pdu_request(self, origin, event_id):
|
||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
||||
async def on_pdu_request(self, origin, event_id):
|
||||
pdu = await self.handler.get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
||||
else:
|
||||
return 404, ""
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
async def on_query_request(self, query_type, args):
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
resp = await self.registry.on_query(query_type, args)
|
||||
return 200, resp
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
if room_version not in supported_versions:
|
||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||
logger.warning(
|
||||
"Room version %s not in %s", room_version, supported_versions
|
||||
)
|
||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||
|
||||
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
async def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
@@ -369,81 +362,71 @@ class FederationServer(FederationBase):
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
ret_pdu = await self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content, room_id):
|
||||
async def on_send_join_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return (
|
||||
200,
|
||||
{
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [
|
||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||
],
|
||||
},
|
||||
)
|
||||
return {
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_leave_request(self, origin, room_id, user_id):
|
||||
async def on_make_leave_request(self, origin, room_id, user_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_leave_request(self, origin, content, room_id):
|
||||
async def on_send_leave_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
yield self.handler.on_send_leave_request(origin, pdu)
|
||||
return 200, {}
|
||||
await self.handler.on_send_leave_request(origin, pdu)
|
||||
return {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
async def on_event_auth(self, origin, room_id, event_id):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
auth_pdus = await self.handler.on_event_auth(event_id)
|
||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||
return 200, res
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
async def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
"""
|
||||
Content is a dict with keys::
|
||||
auth_chain (list): A list of events that give the auth chain.
|
||||
@@ -462,22 +445,22 @@ class FederationServer(FederationBase):
|
||||
Returns:
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True, room_version=room_version
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
ret = await self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
room_id,
|
||||
@@ -503,16 +486,14 @@ class FederationServer(FederationBase):
|
||||
return self.on_query_request("user_devices", user_id)
|
||||
|
||||
@trace
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_claim_client_keys(self, origin, content):
|
||||
async def on_claim_client_keys(self, origin, content):
|
||||
query = []
|
||||
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||
for device_id, algorithm in device_keys.items():
|
||||
query.append((user_id, device_id, algorithm))
|
||||
|
||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||
results = yield self.store.claim_e2e_one_time_keys(query)
|
||||
results = await self.store.claim_e2e_one_time_keys(query)
|
||||
|
||||
json_result = {}
|
||||
for user_id, device_keys in results.items():
|
||||
@@ -536,14 +517,12 @@ class FederationServer(FederationBase):
|
||||
|
||||
return {"one_time_keys": json_result}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_get_missing_events(
|
||||
async def on_get_missing_events(
|
||||
self, origin, room_id, earliest_events, latest_events, limit
|
||||
):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
with (await self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
@@ -553,7 +532,7 @@ class FederationServer(FederationBase):
|
||||
limit,
|
||||
)
|
||||
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
missing_events = await self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit
|
||||
)
|
||||
|
||||
@@ -586,8 +565,7 @@ class FederationServer(FederationBase):
|
||||
destination=None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_received_pdu(self, origin, pdu):
|
||||
async def _handle_received_pdu(self, origin, pdu):
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
If the event is invalid, then this method throws a FederationError.
|
||||
@@ -640,37 +618,34 @@ class FederationServer(FederationBase):
|
||||
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
||||
room_version = await self.store.get_room_version(pdu.room_id)
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError as e:
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def exchange_third_party_invite(
|
||||
async def exchange_third_party_invite(
|
||||
self, sender_user_id, target_user_id, room_id, signed
|
||||
):
|
||||
ret = yield self.handler.exchange_third_party_invite(
|
||||
ret = await self.handler.exchange_third_party_invite(
|
||||
sender_user_id, target_user_id, room_id, signed
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
||||
ret = yield self.handler.on_exchange_third_party_invite_request(
|
||||
async def on_exchange_third_party_invite_request(self, room_id, event_dict):
|
||||
ret = await self.handler.on_exchange_third_party_invite_request(
|
||||
room_id, event_dict
|
||||
)
|
||||
return ret
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_server_matches_acl(self, server_name, room_id):
|
||||
async def check_server_matches_acl(self, server_name, room_id):
|
||||
"""Check if the given server is allowed by the server ACLs in the room
|
||||
|
||||
Args:
|
||||
@@ -680,13 +655,13 @@ class FederationServer(FederationBase):
|
||||
Raises:
|
||||
AuthError if the server does not match the ACL
|
||||
"""
|
||||
state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
state_ids = await self.store.get_current_state_ids(room_id)
|
||||
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
||||
|
||||
if not acl_event_id:
|
||||
return
|
||||
|
||||
acl_event = yield self.store.get_event(acl_event_id)
|
||||
acl_event = await self.store.get_event(acl_event_id)
|
||||
if server_matches_acl_event(server_name, acl_event):
|
||||
return
|
||||
|
||||
@@ -709,7 +684,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# server name is a literal IP
|
||||
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
|
||||
if not isinstance(allow_ip_literals, bool):
|
||||
logger.warn("Ignorning non-bool allow_ip_literals flag")
|
||||
logger.warning("Ignorning non-bool allow_ip_literals flag")
|
||||
allow_ip_literals = True
|
||||
if not allow_ip_literals:
|
||||
# check for ipv6 literals. These start with '['.
|
||||
@@ -723,7 +698,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# next, check the deny list
|
||||
deny = acl_event.content.get("deny", [])
|
||||
if not isinstance(deny, (list, tuple)):
|
||||
logger.warn("Ignorning non-list deny ACL %s", deny)
|
||||
logger.warning("Ignorning non-list deny ACL %s", deny)
|
||||
deny = []
|
||||
for e in deny:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
@@ -733,7 +708,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# then the allow list.
|
||||
allow = acl_event.content.get("allow", [])
|
||||
if not isinstance(allow, (list, tuple)):
|
||||
logger.warn("Ignorning non-list allow ACL %s", allow)
|
||||
logger.warning("Ignorning non-list allow ACL %s", allow)
|
||||
allow = []
|
||||
for e in allow:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
@@ -747,7 +722,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
|
||||
def _acl_entry_matches(server_name, acl_entry):
|
||||
if not isinstance(acl_entry, six.string_types):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
|
||||
)
|
||||
return False
|
||||
@@ -799,15 +774,14 @@ class FederationHandlerRegistry(object):
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
async def on_edu(self, edu_type, origin, content):
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for EDU type %s", edu_type)
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
with start_active_span_from_edu(content, "handle_edu"):
|
||||
try:
|
||||
yield handler(origin, content)
|
||||
await handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception:
|
||||
@@ -816,7 +790,7 @@ class FederationHandlerRegistry(object):
|
||||
def on_query(self, query_type, args):
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for query type %s", query_type)
|
||||
logger.warning("No handler registered for query type %s", query_type)
|
||||
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|
||||
|
||||
return handler(args)
|
||||
@@ -840,7 +814,7 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
|
||||
super(ReplicationFederationHandlerRegistry, self).__init__()
|
||||
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
async def on_edu(self, edu_type, origin, content):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
if not self.config.use_presence and edu_type == "m.presence":
|
||||
@@ -848,17 +822,17 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
return super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
return await super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
edu_type, origin, content
|
||||
)
|
||||
|
||||
return self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
||||
return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
async def on_query(self, query_type, args):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return handler(args)
|
||||
return await handler(args)
|
||||
|
||||
return self._get_query_client(query_type=query_type, args=args)
|
||||
return await self._get_query_client(query_type=query_type, args=args)
|
||||
|
||||
@@ -44,7 +44,7 @@ class TransactionActions(object):
|
||||
response code and response body.
|
||||
"""
|
||||
if not transaction.transaction_id:
|
||||
raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
|
||||
raise RuntimeError("Cannot persist a transaction with no transaction_id")
|
||||
|
||||
return self.store.get_received_txn_response(transaction.transaction_id, origin)
|
||||
|
||||
@@ -56,7 +56,7 @@ class TransactionActions(object):
|
||||
Deferred
|
||||
"""
|
||||
if not transaction.transaction_id:
|
||||
raise RuntimeError("Cannot persist a transaction with no " "transaction_id")
|
||||
raise RuntimeError("Cannot persist a transaction with no transaction_id")
|
||||
|
||||
return self.store.set_received_txn_response(
|
||||
transaction.transaction_id, origin, code, response
|
||||
|
||||
@@ -36,6 +36,8 @@ from six import iteritems
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -212,7 +214,7 @@ class FederationRemoteSendQueue(object):
|
||||
receipt (synapse.types.ReadReceipt):
|
||||
"""
|
||||
# nothing to do here: the replication listener will handle it.
|
||||
pass
|
||||
return defer.succeed(None)
|
||||
|
||||
def send_presence(self, states):
|
||||
"""As per FederationSender
|
||||
|
||||
@@ -49,7 +49,7 @@ sent_pdus_destination_dist_count = Counter(
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total",
|
||||
"" "Total number of PDUs queued for sending across all destinations",
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -192,15 +192,16 @@ class PerDestinationQueue(object):
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = (
|
||||
yield self._get_device_update_edus(limit)
|
||||
device_update_edus, dev_list_id = yield self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
to_device_edus, device_stream_id = (
|
||||
yield self._get_to_device_message_edus(limit)
|
||||
)
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = yield self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
@@ -359,20 +360,20 @@ class PerDestinationQueue(object):
|
||||
last_device_list = self._last_device_list_stream_id
|
||||
|
||||
# Retrieve list of new device updates to send to the destination
|
||||
now_stream_id, results = yield self._store.get_devices_by_remote(
|
||||
now_stream_id, results = yield self._store.get_device_updates_by_remote(
|
||||
self._destination, last_device_list, limit=limit
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.device_list_update",
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
for content in results
|
||||
for (edu_type, content) in results
|
||||
]
|
||||
|
||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
|
||||
|
||||
return (edus, now_stream_id)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user