Compare commits
325 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| fecf3161e9 | |||
| 321c054366 | |||
| eafa6f3b0d | |||
| 75223bb1d3 | |||
| b9d57502da | |||
| a8ac40445c | |||
| 23bc5ee14e | |||
| 76b25075b0 | |||
| f69f4e7a97 | |||
| 8a2e8eaaec | |||
| 785cbd3999 | |||
| b18f54c845 | |||
| cbb926f237 | |||
| f7cb88c3bd | |||
| a6fb79cf36 | |||
| 6d2557f1e8 | |||
| cec502a595 | |||
| fe8636b79e | |||
| 059369dd04 | |||
| de7ba972be | |||
| 35e6e69ea9 | |||
| 34b826bef8 | |||
| f014ea9b5c | |||
| b0cf4228d2 | |||
| 21c037ac46 | |||
| 74fb729213 | |||
| 412c6e21a8 | |||
| c188bd2c12 | |||
| 20402aa128 | |||
| 6d86df73f1 | |||
| 6382914587 | |||
| fb5acd7039 | |||
| 8cf7fbbce0 | |||
| 7809f0c022 | |||
| d6de55bce9 | |||
| 3ad24ab386 | |||
| 1b63ccd848 | |||
| f70d0a1dd9 | |||
| 3039be82ce | |||
| 28bce1ac7c | |||
| 18bdac8ee4 | |||
| af187805b3 | |||
| 96bdd661b8 | |||
| 0b6fbb28a8 | |||
| e9906b0772 | |||
| fb3469f53a | |||
| f218705d2a | |||
| 2546f32b90 | |||
| 9d9cf3583b | |||
| 2bec3a4953 | |||
| 3de6cc245f | |||
| 156a461cbd | |||
| c9456193d3 | |||
| fb86217553 | |||
| 41546f946e | |||
| a7f0161276 | |||
| 1016f303e5 | |||
| 107ad133fc | |||
| af9f1c0764 | |||
| d1b5b055be | |||
| edeae53221 | |||
| c32d359094 | |||
| bf4db42920 | |||
| 977fa4a717 | |||
| 6881f21f3e | |||
| 8ed9e63432 | |||
| d55bc4a8bf | |||
| 5d018d23f0 | |||
| 93fd3cbc7a | |||
| 3c076c79c5 | |||
| a8f40a8302 | |||
| 55a0c98d16 | |||
| 0b36decfb6 | |||
| 312cc48e2b | |||
| d02e41dcb2 | |||
| da378af445 | |||
| d2e3d5b9db | |||
| 76a58fdcce | |||
| 58af30a6c7 | |||
| 0f632f3a57 | |||
| ad167c3849 | |||
| f25f638c35 | |||
| 3ff3dfe5a3 | |||
| f4a30d286f | |||
| bc35503528 | |||
| a4a9ded4d0 | |||
| e5a0224837 | |||
| dc4d74e44a | |||
| c5288e9984 | |||
| 2e697d3013 | |||
| 0eefb76fa1 | |||
| cf89266b98 | |||
| 02735e140f | |||
| f31d4cb7a2 | |||
| 72167fb394 | |||
| 58a755cdc3 | |||
| 8fde611a8c | |||
| 8f15832950 | |||
| 9fe6ad5fef | |||
| fe2f2fc530 | |||
| 6be336c0d8 | |||
| 3b7a35a59a | |||
| a9bcae9f50 | |||
| d4f91e7e9f | |||
| 4037d3220a | |||
| 123c04daa7 | |||
| 62a2d60d72 | |||
| 958d69f300 | |||
| 15056ca208 | |||
| f92d05e254 | |||
| 7a48d0bab8 | |||
| b4d5ff0af7 | |||
| e23ab7f41a | |||
| 1ec7d656dd | |||
| 458e51df7a | |||
| 63eb4a1b62 | |||
| 8c97f6414c | |||
| 5c3eecc70f | |||
| 4e97eb89e5 | |||
| 448bcfd0f9 | |||
| e6a6c4fbab | |||
| c9964ba600 | |||
| 865077f1d1 | |||
| aecae8f397 | |||
| 7c8c3b8437 | |||
| 3e013b7c8e | |||
| 2a12d76646 | |||
| 97a8b4caf7 | |||
| df3a5db629 | |||
| 85b0bd8fe0 | |||
| 105e7f6ed3 | |||
| 3b476f5767 | |||
| d94916852f | |||
| 84c6ea1af8 | |||
| 45df38e61b | |||
| fa87004bc1 | |||
| bd083a5fcf | |||
| 244953be3f | |||
| 08352d44f8 | |||
| d74595e2ca | |||
| 1a93daf353 | |||
| 97bf307755 | |||
| 992333b995 | |||
| 8b16696b24 | |||
| dde6ea7ff6 | |||
| 2e9cf7dda5 | |||
| 14c24c9037 | |||
| a0ee2ec458 | |||
| d1020653fc | |||
| 1f8bae7724 | |||
| 1cad8d7b6f | |||
| 0f2ecb961e | |||
| 26d742fed6 | |||
| 70e18cee00 | |||
| b1605cdd23 | |||
| 618bd1ee76 | |||
| f16aa3a44b | |||
| c0a1301ccd | |||
| baf081cd3b | |||
| 2d573e2e2b | |||
| 2276936bac | |||
| f30a71a67b | |||
| cf2972c818 | |||
| c159803067 | |||
| 0c4a99607e | |||
| 62921fb53e | |||
| 32768e96d4 | |||
| 418635e68a | |||
| adcd5368b0 | |||
| 73bbaf2bc6 | |||
| 3641784e8c | |||
| 65afc535a6 | |||
| 4806651744 | |||
| fadfde9aaa | |||
| 18a466b84e | |||
| 3db1377b26 | |||
| 841b12867e | |||
| 73bf452666 | |||
| 22d2338ace | |||
| 1883223a01 | |||
| 4f6984aa88 | |||
| cda4460d99 | |||
| 39e594b765 | |||
| cf0006719d | |||
| b2a629ef49 | |||
| d9ea9881d2 | |||
| c96322c8d2 | |||
| 0d0f6d12bc | |||
| 17c27df6ea | |||
| 80cfad233e | |||
| 720d30469f | |||
| 79f689e6c2 | |||
| c560b791e1 | |||
| 8e513e7afc | |||
| 22e862304a | |||
| 0cb72812f9 | |||
| f477ce4b1a | |||
| 66f5ff72fd | |||
| 2017369f7d | |||
| 8b0d5b171e | |||
| 5ea773c505 | |||
| 54437c48ca | |||
| f337d2f0f0 | |||
| 0fd171770a | |||
| 826e6ec3bd | |||
| f99554b15d | |||
| dc7cf81267 | |||
| f214bff0c0 | |||
| dcca56baba | |||
| c7095be913 | |||
| 7704873cb8 | |||
| d7bd9651bc | |||
| 5c07c97c09 | |||
| 7b8bc61834 | |||
| ced4fdaa84 | |||
| 2410335507 | |||
| bd2e1a2aa8 | |||
| ebc5ed1296 | |||
| 5c05ae7ba0 | |||
| b73ce4ba81 | |||
| 356ed0438e | |||
| 6a85cb5ef7 | |||
| a3e40bd5b4 | |||
| cfc00068bd | |||
| dd2851d576 | |||
| 10523241d8 | |||
| 82345bc09a | |||
| 7ad1d76356 | |||
| b2a382efdb | |||
| 89c885909a | |||
| 8e1ada9e6f | |||
| 059d8c1a4e | |||
| c618a5d348 | |||
| 6de09e07a6 | |||
| fa8271c5ac | |||
| 9c70a02a9c | |||
| 1def298119 | |||
| 2091c91fde | |||
| 375162b3c3 | |||
| 65c5592b8e | |||
| c831c5b2bb | |||
| 5ed7853bb0 | |||
| f44354e17f | |||
| d0d479c1af | |||
| 03cc8c4b5d | |||
| eca4f5ac73 | |||
| 1b2067f53d | |||
| e8c53b07f2 | |||
| c8f35d8d38 | |||
| fdefb9e29a | |||
| 37b524f971 | |||
| 823e13ddf4 | |||
| 18c516698e | |||
| d86321300a | |||
| d336b51331 | |||
| 5f158ec039 | |||
| db0a50bc40 | |||
| 24aa0e0a5b | |||
| 4c17a87606 | |||
| d445b3ae57 | |||
| 59f15309ca | |||
| f369164761 | |||
| 6bb0357c94 | |||
| a83577d64f | |||
| 39e9839a04 | |||
| 78a1cd36b5 | |||
| 0a4001eba1 | |||
| 38a6d3eea7 | |||
| 1890cfcf82 | |||
| 8ab3444fdf | |||
| 953dbb7980 | |||
| b2a2e96ea6 | |||
| 351d9bd317 | |||
| f77e997619 | |||
| f281714583 | |||
| 3dd61d12cd | |||
| 4d122d295c | |||
| 65434da75d | |||
| 7b3bc755a3 | |||
| d88421ab03 | |||
| af67c7c1de | |||
| 824707383b | |||
| 73cb716b3c | |||
| 5e01e9ac19 | |||
| f3615a8aa5 | |||
| 7556851665 | |||
| 43d175d17a | |||
| b70e080b59 | |||
| 57eacee4f4 | |||
| c142e5d16a | |||
| 4b1f7febc7 | |||
| f9e99f9534 | |||
| 1af2fcd492 | |||
| f05c7d62bc | |||
| 1a807dfe68 | |||
| 589d43d9cd | |||
| 9b1b79f3f5 | |||
| ad8b909ce9 | |||
| 80cc82a445 | |||
| b4f5416dd9 | |||
| eadb13d2e9 | |||
| 7f0d8e4288 | |||
| 9ccea16d45 | |||
| a6a776f3d8 | |||
| 9481707a52 | |||
| 0e5434264f | |||
| 1ee268d33d | |||
| ee91ac179c | |||
| 822a0f0435 | |||
| 54283f3ed4 | |||
| 20332b278d | |||
| c061d4f237 | |||
| f6608a8805 | |||
| 426854e7bc | |||
| 463b072b12 | |||
| d0b849c86d | |||
| cb8d568cf9 | |||
| 463d5a8fde | |||
| 91753cae59 | |||
| c7b48bd42d | |||
| 0ee9076ffe | |||
| 10fe904d88 | |||
| 9f3c0a8556 | |||
| 65dd5543f6 | |||
| 8ee69f299c |
+23
-9
@@ -49,14 +49,15 @@ steps:
|
|||||||
|
|
||||||
|
|
||||||
- command:
|
- command:
|
||||||
- "python -m pip install tox"
|
- "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
|
||||||
|
- "python3.5 -m pip install tox"
|
||||||
- "tox -e py35-old,codecov"
|
- "tox -e py35-old,codecov"
|
||||||
label: ":python: 3.5 / SQLite / Old Deps"
|
label: ":python: 3.5 / SQLite / Old Deps"
|
||||||
env:
|
env:
|
||||||
TRIAL_FLAGS: "-j 2"
|
TRIAL_FLAGS: "-j 2"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "python:3.5"
|
image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
@@ -117,8 +118,10 @@ steps:
|
|||||||
limit: 2
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.5 / :postgres: 9.5"
|
- label: ":python: 3.5 / :postgres: 9.5"
|
||||||
|
agents:
|
||||||
|
queue: "medium"
|
||||||
env:
|
env:
|
||||||
TRIAL_FLAGS: "-j 4"
|
TRIAL_FLAGS: "-j 8"
|
||||||
command:
|
command:
|
||||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||||
plugins:
|
plugins:
|
||||||
@@ -134,8 +137,10 @@ steps:
|
|||||||
limit: 2
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.7 / :postgres: 9.5"
|
- label: ":python: 3.7 / :postgres: 9.5"
|
||||||
|
agents:
|
||||||
|
queue: "medium"
|
||||||
env:
|
env:
|
||||||
TRIAL_FLAGS: "-j 4"
|
TRIAL_FLAGS: "-j 8"
|
||||||
command:
|
command:
|
||||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||||
plugins:
|
plugins:
|
||||||
@@ -151,8 +156,10 @@ steps:
|
|||||||
limit: 2
|
limit: 2
|
||||||
|
|
||||||
- label: ":python: 3.7 / :postgres: 11"
|
- label: ":python: 3.7 / :postgres: 11"
|
||||||
|
agents:
|
||||||
|
queue: "medium"
|
||||||
env:
|
env:
|
||||||
TRIAL_FLAGS: "-j 4"
|
TRIAL_FLAGS: "-j 8"
|
||||||
command:
|
command:
|
||||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||||
plugins:
|
plugins:
|
||||||
@@ -173,11 +180,13 @@ steps:
|
|||||||
queue: "medium"
|
queue: "medium"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
always-pull: true
|
||||||
|
workdir: "/src"
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -192,11 +201,13 @@ steps:
|
|||||||
POSTGRES: "1"
|
POSTGRES: "1"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
always-pull: true
|
||||||
|
workdir: "/src"
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -210,14 +221,17 @@ steps:
|
|||||||
env:
|
env:
|
||||||
POSTGRES: "1"
|
POSTGRES: "1"
|
||||||
WORKERS: "1"
|
WORKERS: "1"
|
||||||
|
BLACKLIST: "synapse-blacklist-with-workers"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'"
|
||||||
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
soft_fail: true
|
always-pull: true
|
||||||
|
workdir: "/src"
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
|
|||||||
@@ -1,145 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
|
||||||
# sytest-synapse docker images.
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
if [ -n "$BUILDKITE" ]
|
|
||||||
then
|
|
||||||
SYNAPSE_DIR=`pwd`
|
|
||||||
else
|
|
||||||
SYNAPSE_DIR="/src"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Attempt to find a sytest to use.
|
|
||||||
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
|
||||||
if [ -d "/sytest" ]; then
|
|
||||||
# If the user has mounted in a SyTest checkout, use that.
|
|
||||||
echo "Using local sytests..."
|
|
||||||
|
|
||||||
# create ourselves a working directory and dos2unix some scripts therein
|
|
||||||
mkdir -p /work/jenkins
|
|
||||||
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
|
||||||
dos2unix -n "/sytest/$i" "/work/$i"
|
|
||||||
done
|
|
||||||
ln -sf /sytest/tests /work
|
|
||||||
ln -sf /sytest/keys /work
|
|
||||||
SYTEST_LIB="/sytest/lib"
|
|
||||||
else
|
|
||||||
if [ -n "BUILDKITE_BRANCH" ]
|
|
||||||
then
|
|
||||||
branch_name=$BUILDKITE_BRANCH
|
|
||||||
else
|
|
||||||
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
|
||||||
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try and fetch the branch
|
|
||||||
echo "Trying to get same-named sytest branch..."
|
|
||||||
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
|
||||||
# Probably a 404, fall back to develop
|
|
||||||
echo "Using develop instead..."
|
|
||||||
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
|
||||||
}
|
|
||||||
|
|
||||||
mkdir -p /work
|
|
||||||
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
|
||||||
SYTEST_LIB="/work/lib"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd /work
|
|
||||||
|
|
||||||
# PostgreSQL setup
|
|
||||||
if [ -n "$POSTGRES" ]
|
|
||||||
then
|
|
||||||
export PGUSER=postgres
|
|
||||||
export POSTGRES_DB_1=pg1
|
|
||||||
export POSTGRES_DB_2=pg2
|
|
||||||
|
|
||||||
# Start the database
|
|
||||||
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
|
||||||
|
|
||||||
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
|
||||||
jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
# Make the test databases for the two Synapse servers that will be spun up
|
|
||||||
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
|
||||||
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$OFFLINE" ]; then
|
|
||||||
# if we're in offline mode, just put synapse into the virtualenv, and
|
|
||||||
# hope that the deps are up-to-date.
|
|
||||||
#
|
|
||||||
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
|
||||||
# so we just run setup.py explicitly.)
|
|
||||||
#
|
|
||||||
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
|
||||||
else
|
|
||||||
# We've already created the virtualenv, but lets double check we have all
|
|
||||||
# deps.
|
|
||||||
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
|
||||||
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
|
||||||
lxml psycopg2 coverage codecov tap.py
|
|
||||||
|
|
||||||
# Make sure all Perl deps are installed -- this is done in the docker build
|
|
||||||
# so will only install packages added since the last Docker build
|
|
||||||
./install-deps.pl
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Run the tests
|
|
||||||
>&2 echo "+++ Running tests"
|
|
||||||
|
|
||||||
RUN_TESTS=(
|
|
||||||
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
|
||||||
)
|
|
||||||
|
|
||||||
TEST_STATUS=0
|
|
||||||
|
|
||||||
if [ -n "$WORKERS" ]; then
|
|
||||||
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
|
||||||
else
|
|
||||||
RUN_TESTS+=(-I Synapse)
|
|
||||||
fi
|
|
||||||
|
|
||||||
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
|
||||||
|
|
||||||
if [ $TEST_STATUS -ne 0 ]; then
|
|
||||||
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
|
||||||
else
|
|
||||||
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
|
||||||
fi
|
|
||||||
|
|
||||||
>&2 echo "--- Copying assets"
|
|
||||||
|
|
||||||
# Copy out the logs
|
|
||||||
mkdir -p /logs
|
|
||||||
cp results.tap /logs/results.tap
|
|
||||||
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
|
||||||
|
|
||||||
# Upload coverage to codecov and upload files, if running on Buildkite
|
|
||||||
if [ -n "$BUILDKITE" ]
|
|
||||||
then
|
|
||||||
/venv/bin/coverage combine || true
|
|
||||||
/venv/bin/coverage xml || true
|
|
||||||
/venv/bin/codecov -X gcov -f coverage.xml
|
|
||||||
|
|
||||||
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
|
||||||
tar xvf buildkite.tar.gz
|
|
||||||
chmod +x ./buildkite-agent
|
|
||||||
|
|
||||||
# Upload the files
|
|
||||||
./buildkite-agent artifact upload "/logs/**/*.log*"
|
|
||||||
./buildkite-agent artifact upload "/logs/results.tap"
|
|
||||||
|
|
||||||
if [ $TEST_STATUS -ne 0 ]; then
|
|
||||||
# Annotate, if failure
|
|
||||||
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
exit $TEST_STATUS
|
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||||
|
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||||
|
|
||||||
|
Message history can be paginated
|
||||||
|
|
||||||
|
Can re-join room if re-invited
|
||||||
|
|
||||||
|
/upgrade creates a new room
|
||||||
|
|
||||||
|
The only membership state included in an initial sync is for all the senders in the timeline
|
||||||
|
|
||||||
|
Local device key changes get to remote servers
|
||||||
|
|
||||||
|
If remote user leaves room we no longer receive device updates
|
||||||
|
|
||||||
|
Forgotten room messages cannot be paginated
|
||||||
|
|
||||||
|
Inbound federation can get public room list
|
||||||
|
|
||||||
|
Members from the gap are included in gappy incr LL sync
|
||||||
|
|
||||||
|
Leaves are present in non-gapped incremental syncs
|
||||||
|
|
||||||
|
Old leaves are present in gapped incremental syncs
|
||||||
|
|
||||||
|
User sees updates to presence from other users in the incremental sync.
|
||||||
|
|
||||||
|
Gapped incremental syncs include all state changes
|
||||||
|
|
||||||
|
Old members are included in gappy incr LL sync if they start speaking
|
||||||
+1
-2
@@ -1,5 +1,4 @@
|
|||||||
comment:
|
comment: off
|
||||||
layout: "diff"
|
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
status:
|
status:
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ _trial_temp*/
|
|||||||
/*.log
|
/*.log
|
||||||
/*.log.config
|
/*.log.config
|
||||||
/*.pid
|
/*.pid
|
||||||
|
/.python-version
|
||||||
/*.signing.key
|
/*.signing.key
|
||||||
/env/
|
/env/
|
||||||
/homeserver*.yaml
|
/homeserver*.yaml
|
||||||
|
|||||||
+251
@@ -1,3 +1,254 @@
|
|||||||
|
Synapse 1.3.1 (2019-08-17)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Drop hard dependency on `sdnotify` python package. ([\#5871](https://github.com/matrix-org/synapse/issues/5871))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix startup issue (hang on ACME provisioning) due to ordering of Twisted reactor startup. Thanks to @chrismoos for supplying the fix. ([\#5867](https://github.com/matrix-org/synapse/issues/5867))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.3.0 (2019-08-15)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix 500 Internal Server Error on `publicRooms` when the public room list was
|
||||||
|
cached. ([\#5851](https://github.com/matrix-org/synapse/issues/5851))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.3.0rc1 (2019-08-13)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Use `M_USER_DEACTIVATED` instead of `M_UNKNOWN` for errcode when a deactivated user attempts to login. ([\#5686](https://github.com/matrix-org/synapse/issues/5686))
|
||||||
|
- Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify. ([\#5732](https://github.com/matrix-org/synapse/issues/5732))
|
||||||
|
- Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. ([\#5754](https://github.com/matrix-org/synapse/issues/5754), [\#5848](https://github.com/matrix-org/synapse/issues/5848))
|
||||||
|
- Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. ([\#5783](https://github.com/matrix-org/synapse/issues/5783))
|
||||||
|
- Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature. ([\#5807](https://github.com/matrix-org/synapse/issues/5807))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix UISIs during homeserver outage. ([\#5693](https://github.com/matrix-org/synapse/issues/5693), [\#5789](https://github.com/matrix-org/synapse/issues/5789))
|
||||||
|
- Fix stack overflow in server key lookup code. ([\#5724](https://github.com/matrix-org/synapse/issues/5724))
|
||||||
|
- start.sh no longer uses deprecated cli option. ([\#5725](https://github.com/matrix-org/synapse/issues/5725))
|
||||||
|
- Log when we receive an event receipt from an unexpected origin. ([\#5743](https://github.com/matrix-org/synapse/issues/5743))
|
||||||
|
- Fix debian packaging scripts to correctly build sid packages. ([\#5775](https://github.com/matrix-org/synapse/issues/5775))
|
||||||
|
- Correctly handle redactions of redactions. ([\#5788](https://github.com/matrix-org/synapse/issues/5788))
|
||||||
|
- Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions. ([\#5798](https://github.com/matrix-org/synapse/issues/5798))
|
||||||
|
- Fix check that tombstone is a state event in push rules. ([\#5804](https://github.com/matrix-org/synapse/issues/5804))
|
||||||
|
- Fix error when trying to login as a deactivated user when using a worker to handle login. ([\#5806](https://github.com/matrix-org/synapse/issues/5806))
|
||||||
|
- Fix bug where user `/sync` stream could get wedged in rare circumstances. ([\#5825](https://github.com/matrix-org/synapse/issues/5825))
|
||||||
|
- The purge_remote_media.sh script was fixed. ([\#5839](https://github.com/matrix-org/synapse/issues/5839))
|
||||||
|
|
||||||
|
|
||||||
|
Deprecations and Removals
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
- Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. ([\#5678](https://github.com/matrix-org/synapse/issues/5678), [\#5729](https://github.com/matrix-org/synapse/issues/5729))
|
||||||
|
- Remove non-functional 'expire_access_token' setting. ([\#5782](https://github.com/matrix-org/synapse/issues/5782))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Make Jaeger fully configurable. ([\#5694](https://github.com/matrix-org/synapse/issues/5694))
|
||||||
|
- Add precautionary measures to prevent future abuse of `window.opener` in default welcome page. ([\#5695](https://github.com/matrix-org/synapse/issues/5695))
|
||||||
|
- Reduce database IO usage by optimising queries for current membership. ([\#5706](https://github.com/matrix-org/synapse/issues/5706), [\#5738](https://github.com/matrix-org/synapse/issues/5738), [\#5746](https://github.com/matrix-org/synapse/issues/5746), [\#5752](https://github.com/matrix-org/synapse/issues/5752), [\#5770](https://github.com/matrix-org/synapse/issues/5770), [\#5774](https://github.com/matrix-org/synapse/issues/5774), [\#5792](https://github.com/matrix-org/synapse/issues/5792), [\#5793](https://github.com/matrix-org/synapse/issues/5793))
|
||||||
|
- Improve caching when fetching `get_filtered_current_state_ids`. ([\#5713](https://github.com/matrix-org/synapse/issues/5713))
|
||||||
|
- Don't accept opentracing data from clients. ([\#5715](https://github.com/matrix-org/synapse/issues/5715))
|
||||||
|
- Speed up PostgreSQL unit tests in CI. ([\#5717](https://github.com/matrix-org/synapse/issues/5717))
|
||||||
|
- Update the coding style document. ([\#5719](https://github.com/matrix-org/synapse/issues/5719))
|
||||||
|
- Improve database query performance when recording retry intervals for remote hosts. ([\#5720](https://github.com/matrix-org/synapse/issues/5720))
|
||||||
|
- Add a set of opentracing utils. ([\#5722](https://github.com/matrix-org/synapse/issues/5722))
|
||||||
|
- Cache result of get_version_string to reduce overhead of `/version` federation requests. ([\#5730](https://github.com/matrix-org/synapse/issues/5730))
|
||||||
|
- Return 'user_type' in admin API user endpoints results. ([\#5731](https://github.com/matrix-org/synapse/issues/5731))
|
||||||
|
- Don't package the sytest test blacklist file. ([\#5733](https://github.com/matrix-org/synapse/issues/5733))
|
||||||
|
- Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. ([\#5736](https://github.com/matrix-org/synapse/issues/5736))
|
||||||
|
- Blacklist some flakey tests in worker mode. ([\#5740](https://github.com/matrix-org/synapse/issues/5740))
|
||||||
|
- Fix some error cases in the caching layer. ([\#5749](https://github.com/matrix-org/synapse/issues/5749))
|
||||||
|
- Add a prometheus metric for pending cache lookups. ([\#5750](https://github.com/matrix-org/synapse/issues/5750))
|
||||||
|
- Stop trying to fetch events with event_id=None. ([\#5753](https://github.com/matrix-org/synapse/issues/5753))
|
||||||
|
- Convert RedactionTestCase to modern test style. ([\#5768](https://github.com/matrix-org/synapse/issues/5768))
|
||||||
|
- Allow looping calls to be given arguments. ([\#5780](https://github.com/matrix-org/synapse/issues/5780))
|
||||||
|
- Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO. ([\#5785](https://github.com/matrix-org/synapse/issues/5785))
|
||||||
|
- Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests. ([\#5787](https://github.com/matrix-org/synapse/issues/5787))
|
||||||
|
- Remove some spurious exceptions from the logs where we failed to talk to a remote server. ([\#5790](https://github.com/matrix-org/synapse/issues/5790))
|
||||||
|
- Improve performance when making `.well-known` requests by sharing the SSL options between requests. ([\#5794](https://github.com/matrix-org/synapse/issues/5794))
|
||||||
|
- Disable codecov GitHub comments on PRs. ([\#5796](https://github.com/matrix-org/synapse/issues/5796))
|
||||||
|
- Don't allow clients to send tombstone events that reference the room it's sent in. ([\#5801](https://github.com/matrix-org/synapse/issues/5801))
|
||||||
|
- Deny redactions of events sent in a different room. ([\#5802](https://github.com/matrix-org/synapse/issues/5802))
|
||||||
|
- Deny sending well known state types as non-state events. ([\#5805](https://github.com/matrix-org/synapse/issues/5805))
|
||||||
|
- Handle incorrectly encoded query params correctly by returning a 400. ([\#5808](https://github.com/matrix-org/synapse/issues/5808))
|
||||||
|
- Handle pusher being deleted during processing rather than logging an exception. ([\#5809](https://github.com/matrix-org/synapse/issues/5809))
|
||||||
|
- Return 502 not 500 when failing to reach any remote server. ([\#5810](https://github.com/matrix-org/synapse/issues/5810))
|
||||||
|
- Reduce global pauses in the events stream caused by expensive state resolution during persistence. ([\#5826](https://github.com/matrix-org/synapse/issues/5826))
|
||||||
|
- Add a lower bound to well-known lookup cache time to avoid repeated lookups. ([\#5836](https://github.com/matrix-org/synapse/issues/5836))
|
||||||
|
- Whitelist history visbility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.2.1 (2019-07-26)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Security update
|
||||||
|
---------------
|
||||||
|
|
||||||
|
This release includes *four* security fixes:
|
||||||
|
|
||||||
|
- Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767))
|
||||||
|
- Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961))
|
||||||
|
- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @dylangerdaly for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744))
|
||||||
|
- Fix a vulnerability where a federated server could spoof read-receipts from
|
||||||
|
users on other servers. Thanks to @dylangerdaly for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743))
|
||||||
|
|
||||||
|
Additionally, the following fix was in Synapse **1.2.0**, but was not correctly
|
||||||
|
identified during the original release:
|
||||||
|
|
||||||
|
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||||
|
|
||||||
|
Synapse 1.2.0 (2019-07-25)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
No significant changes.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.2.0rc2 (2019-07-24)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.2.0rc1 (2019-07-22)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Security fixes
|
||||||
|
--------------
|
||||||
|
|
||||||
|
This update included a security fix which was initially incorrectly flagged as
|
||||||
|
a regular bug fix.
|
||||||
|
|
||||||
|
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712))
|
||||||
|
- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589))
|
||||||
|
- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597))
|
||||||
|
- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613))
|
||||||
|
- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623))
|
||||||
|
- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626))
|
||||||
|
- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660))
|
||||||
|
- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674))
|
||||||
|
- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609))
|
||||||
|
- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621))
|
||||||
|
- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629))
|
||||||
|
- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638))
|
||||||
|
- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644))
|
||||||
|
- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654))
|
||||||
|
- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658))
|
||||||
|
- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707))
|
||||||
|
|
||||||
|
|
||||||
|
Updates to the Docker image
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619))
|
||||||
|
- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620))
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397))
|
||||||
|
- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651))
|
||||||
|
- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661))
|
||||||
|
- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675))
|
||||||
|
|
||||||
|
|
||||||
|
Deprecations and Removals
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617))
|
||||||
|
- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611))
|
||||||
|
- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616))
|
||||||
|
- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622))
|
||||||
|
- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627))
|
||||||
|
- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628))
|
||||||
|
- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630))
|
||||||
|
- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636))
|
||||||
|
- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637))
|
||||||
|
- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639))
|
||||||
|
- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640))
|
||||||
|
- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641))
|
||||||
|
- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642))
|
||||||
|
- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643))
|
||||||
|
- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645))
|
||||||
|
- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655))
|
||||||
|
- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656))
|
||||||
|
- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657))
|
||||||
|
- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659))
|
||||||
|
- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664))
|
||||||
|
- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673))
|
||||||
|
- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689))
|
||||||
|
- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.1.0 (2019-07-04)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
|
||||||
|
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
|
||||||
|
|
||||||
|
This release also deprecates the use of environment variables to configure the
|
||||||
|
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
|
||||||
|
for more details.
|
||||||
|
|
||||||
|
No changes since 1.1.0rc2.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.1.0rc2 (2019-07-03)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
|
||||||
|
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
|
||||||
|
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.1.0rc1 (2019-07-02)
|
Synapse 1.1.0rc1 (2019-07-02)
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
|
|||||||
+18
-11
@@ -30,11 +30,10 @@ use github's pull request workflow to review the contribution, and either ask
|
|||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
|
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
|
||||||
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
|
continuous integration. Buildkite builds need to be authorised by a
|
||||||
Buildkite builds need to be authorised by a maintainer. If your change breaks
|
maintainer. If your change breaks the build, this will be shown in GitHub, so
|
||||||
the build, this will be shown in GitHub, so please keep an eye on the pull
|
please keep an eye on the pull request for feedback.
|
||||||
request for feedback.
|
|
||||||
|
|
||||||
To run unit tests in a local development environment, you can use:
|
To run unit tests in a local development environment, you can use:
|
||||||
|
|
||||||
@@ -70,13 +69,21 @@ All changes, even minor ones, need a corresponding changelog / newsfragment
|
|||||||
entry. These are managed by Towncrier
|
entry. These are managed by Towncrier
|
||||||
(https://github.com/hawkowl/towncrier).
|
(https://github.com/hawkowl/towncrier).
|
||||||
|
|
||||||
To create a changelog entry, make a new file in the ``changelog.d``
|
To create a changelog entry, make a new file in the ``changelog.d`` file named
|
||||||
file named in the format of ``PRnumber.type``. The type can be
|
in the format of ``PRnumber.type``. The type can be one of the following:
|
||||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
|
||||||
deprecations), or ``misc`` (for internal-only changes).
|
|
||||||
|
|
||||||
The content of the file is your changelog entry, which can contain Markdown
|
* ``feature``.
|
||||||
formatting. The entry should end with a full stop ('.') for consistency.
|
* ``bugfix``.
|
||||||
|
* ``docker`` (for updates to the Docker image).
|
||||||
|
* ``doc`` (for updates to the documentation).
|
||||||
|
* ``removal`` (also used for deprecations).
|
||||||
|
* ``misc`` (for internal-only changes).
|
||||||
|
|
||||||
|
The content of the file is your changelog entry, which should be a short
|
||||||
|
description of your change in the same style as the rest of our `changelog
|
||||||
|
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
|
||||||
|
contain Markdown formatting, and should end with a full stop ('.') for
|
||||||
|
consistency.
|
||||||
|
|
||||||
Adding credits to the changelog is encouraged, we value your
|
Adding credits to the changelog is encouraged, we value your
|
||||||
contributions and would like to have you shouted out in the release notes!
|
contributions and would like to have you shouted out in the release notes!
|
||||||
|
|||||||
+4
-5
@@ -419,12 +419,11 @@ If Synapse is not configured with an SMTP server, password reset via email will
|
|||||||
|
|
||||||
## Registering a user
|
## Registering a user
|
||||||
|
|
||||||
You will need at least one user on your server in order to use a Matrix
|
The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
|
||||||
client. Users can be registered either via a Matrix client, or via a
|
|
||||||
commandline script.
|
|
||||||
|
|
||||||
To get started, it is easiest to use the command line to register new
|
Alternatively you can do so from the command line if you have installed via pip.
|
||||||
users. This can be done as follows:
|
|
||||||
|
This can be done as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ source ~/synapse/env/bin/activate
|
$ source ~/synapse/env/bin/activate
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ exclude Dockerfile
|
|||||||
exclude .dockerignore
|
exclude .dockerignore
|
||||||
exclude test_postgresql.sh
|
exclude test_postgresql.sh
|
||||||
exclude .editorconfig
|
exclude .editorconfig
|
||||||
|
exclude sytest-blacklist
|
||||||
|
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
recursive-include changelog.d *
|
recursive-include changelog.d *
|
||||||
|
|||||||
+1
-1
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
|||||||
|
|
||||||
virtualenv -p python3 env
|
virtualenv -p python3 env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
python -m pip install --no-pep-517 -e .[all]
|
python -m pip install --no-use-pep517 -e .[all]
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
dependencies into a virtual env.
|
dependencies into a virtual env.
|
||||||
|
|||||||
@@ -49,6 +49,13 @@ returned by the Client-Server API:
|
|||||||
# configured on port 443.
|
# configured on port 443.
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
|
Upgrading to v1.2.0
|
||||||
|
===================
|
||||||
|
|
||||||
|
Some counter metrics have been renamed, with the old names deprecated. See
|
||||||
|
`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
|
||||||
|
for details.
|
||||||
|
|
||||||
Upgrading to v1.1.0
|
Upgrading to v1.1.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
Update github templates.
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add admin API endpoint for setting whether or not a user is a server administrator.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add admin API endpoint for getting whether or not a user is a server administrator.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add config option to increase ratelimits for room admins redacting messages.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Handle userid clashes when authenticating via SAML by appending an integer suffix.
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
# `homeserver.yaml`, and restart synapse.
|
# `homeserver.yaml`, and restart synapse.
|
||||||
#
|
#
|
||||||
# This configuration will produce similar results to the defaults within
|
# This configuration will produce similar results to the defaults within
|
||||||
# synapse, but can be edited to give more flexibility.
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
version: 1
|
version: 1
|
||||||
@@ -12,7 +12,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
@@ -35,7 +35,7 @@ handlers:
|
|||||||
root:
|
root:
|
||||||
level: INFO
|
level: INFO
|
||||||
handlers: [console] # to use file handler instead, switch to [file]
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
|
|||||||
|
|
||||||
from synapse.app.homeserver import SynapseHomeServer
|
from synapse.app.homeserver import SynapseHomeServer
|
||||||
|
|
||||||
# from synapse.util.logutils import log_function
|
# from synapse.logging.utils import log_function
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from twisted.internet import reactor, defer
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
|
|||||||
@@ -51,4 +51,4 @@ TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id
|
|||||||
# finally start pruning media:
|
# finally start pruning media:
|
||||||
###############################################################################
|
###############################################################################
|
||||||
set -x # for debugging the generated string
|
set -x # for debugging the generated string
|
||||||
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ After=matrix-synapse.service
|
|||||||
BindsTo=matrix-synapse.service
|
BindsTo=matrix-synapse.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
User=matrix-synapse
|
User=matrix-synapse
|
||||||
WorkingDirectory=/var/lib/matrix-synapse
|
WorkingDirectory=/var/lib/matrix-synapse
|
||||||
EnvironmentFile=/etc/default/matrix-synapse
|
EnvironmentFile=/etc/default/matrix-synapse
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
Description=Synapse Matrix Homeserver
|
Description=Synapse Matrix Homeserver
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
User=matrix-synapse
|
User=matrix-synapse
|
||||||
WorkingDirectory=/var/lib/matrix-synapse
|
WorkingDirectory=/var/lib/matrix-synapse
|
||||||
EnvironmentFile=/etc/default/matrix-synapse
|
EnvironmentFile=/etc/default/matrix-synapse
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
@@ -14,7 +14,9 @@
|
|||||||
Description=Synapse Matrix homeserver
|
Description=Synapse Matrix homeserver
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=notify
|
||||||
|
NotifyAccess=main
|
||||||
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
Restart=on-abort
|
Restart=on-abort
|
||||||
|
|
||||||
User=synapse
|
User=synapse
|
||||||
|
|||||||
Vendored
+34
-2
@@ -1,9 +1,41 @@
|
|||||||
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
|
matrix-synapse-py3 (1.3.1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New synapse release 1.3.1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Sat, 17 Aug 2019 09:15:49 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (1.3.0) stable; urgency=medium
|
||||||
|
|
||||||
|
[ Andrew Morgan ]
|
||||||
|
* Remove libsqlite3-dev from required build dependencies.
|
||||||
|
|
||||||
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 1.3.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 15 Aug 2019 12:04:23 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (1.2.0) stable; urgency=medium
|
||||||
|
|
||||||
|
[ Amber Brown ]
|
||||||
|
* Update logging config defaults to match API changes in Synapse.
|
||||||
|
|
||||||
|
[ Richard van der Hoff ]
|
||||||
|
* Add Recommends and Depends for some libraries which you probably want.
|
||||||
|
|
||||||
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 1.2.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Jul 2019 14:10:07 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
||||||
|
|
||||||
[ Silke Hofstra ]
|
[ Silke Hofstra ]
|
||||||
* Include systemd-python to allow logging to the systemd journal.
|
* Include systemd-python to allow logging to the systemd journal.
|
||||||
|
|
||||||
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 1.1.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||||
|
|
||||||
|
|||||||
Vendored
+6
@@ -2,10 +2,13 @@ Source: matrix-synapse-py3
|
|||||||
Section: contrib/python
|
Section: contrib/python
|
||||||
Priority: extra
|
Priority: extra
|
||||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||||
|
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||||
Build-Depends:
|
Build-Depends:
|
||||||
debhelper (>= 9),
|
debhelper (>= 9),
|
||||||
dh-systemd,
|
dh-systemd,
|
||||||
dh-virtualenv (>= 1.1),
|
dh-virtualenv (>= 1.1),
|
||||||
|
libsystemd-dev,
|
||||||
|
libpq-dev,
|
||||||
lsb-release,
|
lsb-release,
|
||||||
python3-dev,
|
python3-dev,
|
||||||
python3,
|
python3,
|
||||||
@@ -28,9 +31,12 @@ Depends:
|
|||||||
debconf,
|
debconf,
|
||||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
${synapse:pydepends},
|
${synapse:pydepends},
|
||||||
# some of our scripts use perl, but none of them are important,
|
# some of our scripts use perl, but none of them are important,
|
||||||
# so we put perl:Depends in Suggests rather than Depends.
|
# so we put perl:Depends in Suggests rather than Depends.
|
||||||
|
Recommends:
|
||||||
|
${shlibs1:Recommends},
|
||||||
Suggests:
|
Suggests:
|
||||||
sqlite3,
|
sqlite3,
|
||||||
${perl:Depends},
|
${perl:Depends},
|
||||||
|
|||||||
Vendored
+1
-1
@@ -7,7 +7,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
Vendored
+14
@@ -3,15 +3,29 @@
|
|||||||
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# assume we only have one package
|
||||||
|
PACKAGE_NAME:=`dh_listpackages`
|
||||||
|
|
||||||
override_dh_systemd_enable:
|
override_dh_systemd_enable:
|
||||||
dh_systemd_enable --name=matrix-synapse
|
dh_systemd_enable --name=matrix-synapse
|
||||||
|
|
||||||
override_dh_installinit:
|
override_dh_installinit:
|
||||||
dh_installinit --name=matrix-synapse
|
dh_installinit --name=matrix-synapse
|
||||||
|
|
||||||
|
# we don't really want to strip the symbols from our object files.
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
|
|
||||||
override_dh_shlibdeps:
|
override_dh_shlibdeps:
|
||||||
|
# make the postgres package's dependencies a recommendation
|
||||||
|
# rather than a hard dependency.
|
||||||
|
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
|
||||||
|
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
|
||||||
|
-pshlibs1 -dRecommends
|
||||||
|
|
||||||
|
# all the other dependencies can be normal 'Depends' requirements,
|
||||||
|
# except for PIL's, which is self-contained and which confuses
|
||||||
|
# dpkg-shlibdeps.
|
||||||
|
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
|
||||||
|
|
||||||
override_dh_virtualenv:
|
override_dh_virtualenv:
|
||||||
./debian/build_virtualenv
|
./debian/build_virtualenv
|
||||||
|
|||||||
+3
-4
@@ -29,7 +29,7 @@ for port in 8080 8081 8082; do
|
|||||||
|
|
||||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||||
|
|
||||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||||
|
|
||||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||||
@@ -43,7 +43,7 @@ for port in 8080 8081 8082; do
|
|||||||
tls: true
|
tls: true
|
||||||
resources:
|
resources:
|
||||||
- names: [client, federation]
|
- names: [client, federation]
|
||||||
|
|
||||||
- port: $port
|
- port: $port
|
||||||
tls: false
|
tls: false
|
||||||
bind_addresses: ['::1', '127.0.0.1']
|
bind_addresses: ['::1', '127.0.0.1']
|
||||||
@@ -68,7 +68,7 @@ for port in 8080 8081 8082; do
|
|||||||
|
|
||||||
# Generate tls keys
|
# Generate tls keys
|
||||||
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
||||||
|
|
||||||
# Ignore keys from the trusted keys server
|
# Ignore keys from the trusted keys server
|
||||||
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
||||||
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
||||||
@@ -120,7 +120,6 @@ for port in 8080 8081 8082; do
|
|||||||
python3 -m synapse.app.homeserver \
|
python3 -m synapse.app.homeserver \
|
||||||
--config-path "$DIR/etc/$port.config" \
|
--config-path "$DIR/etc/$port.config" \
|
||||||
-D \
|
-D \
|
||||||
-vv \
|
|
||||||
|
|
||||||
popd
|
popd
|
||||||
done
|
done
|
||||||
|
|||||||
+2
-2
@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
|||||||
###
|
###
|
||||||
### Stage 0: builder
|
### Stage 0: builder
|
||||||
###
|
###
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
||||||
|
|
||||||
# install the OS build deps
|
# install the OS build deps
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
|||||||
### Stage 1: runtime
|
### Stage 1: runtime
|
||||||
###
|
###
|
||||||
|
|
||||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
||||||
|
|
||||||
# xmlsec is required for saml support
|
# xmlsec is required for saml support
|
||||||
RUN apk add --no-cache --virtual .runtime_deps \
|
RUN apk add --no-cache --virtual .runtime_deps \
|
||||||
|
|||||||
@@ -42,7 +42,15 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
|||||||
###
|
###
|
||||||
FROM ${distro}
|
FROM ${distro}
|
||||||
|
|
||||||
|
# Get the distro we want to pull from as a dynamic build variable
|
||||||
|
# (We need to define it in each build stage)
|
||||||
|
ARG distro=""
|
||||||
|
ENV distro ${distro}
|
||||||
|
|
||||||
# Install the build dependencies
|
# Install the build dependencies
|
||||||
|
#
|
||||||
|
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||||
|
# TODO: it would be nice to do that automatically.
|
||||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||||
|
|||||||
@@ -4,7 +4,8 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
DIST=`lsb_release -c -s`
|
# Get the codename from distro env
|
||||||
|
DIST=`cut -d ':' -f2 <<< $distro`
|
||||||
|
|
||||||
# we get a read-only copy of the source: make a writeable copy
|
# we get a read-only copy of the source: make a writeable copy
|
||||||
cp -aT /synapse/source /synapse/build
|
cp -aT /synapse/source /synapse/build
|
||||||
|
|||||||
@@ -207,22 +207,3 @@ perspectives:
|
|||||||
|
|
||||||
password_config:
|
password_config:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
{% if SYNAPSE_SMTP_HOST %}
|
|
||||||
email:
|
|
||||||
enable_notifs: false
|
|
||||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
|
||||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
|
||||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
|
||||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
|
||||||
require_transport_security: False
|
|
||||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
|
||||||
app_name: Matrix
|
|
||||||
# if template_dir is unset, uses the example templates that are part of
|
|
||||||
# the Synapse distribution.
|
|
||||||
#template_dir: res/templates
|
|
||||||
notif_template_html: notif_mail.html
|
|
||||||
notif_template_text: notif_mail.txt
|
|
||||||
notif_for_new_users: True
|
|
||||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ version: 1
|
|||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
@@ -84,3 +84,42 @@ with a body of:
|
|||||||
}
|
}
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
|
||||||
|
Get whether a user is a server administrator or not
|
||||||
|
===================================================
|
||||||
|
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
GET /_synapse/admin/v1/users/<user_id>/admin
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
A response body like the following is returned:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"admin": true
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Change whether a user is a server administrator or not
|
||||||
|
======================================================
|
||||||
|
|
||||||
|
Note that you cannot demote yourself.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
PUT /_synapse/admin/v1/users/<user_id>/admin
|
||||||
|
|
||||||
|
with a body of:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"admin": true
|
||||||
|
}
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|||||||
+99
-35
@@ -1,4 +1,8 @@
|
|||||||
# Code Style
|
Code Style
|
||||||
|
==========
|
||||||
|
|
||||||
|
Formatting tools
|
||||||
|
----------------
|
||||||
|
|
||||||
The Synapse codebase uses a number of code formatting tools in order to
|
The Synapse codebase uses a number of code formatting tools in order to
|
||||||
quickly and automatically check for formatting (and sometimes logical) errors
|
quickly and automatically check for formatting (and sometimes logical) errors
|
||||||
@@ -6,20 +10,20 @@ in code.
|
|||||||
|
|
||||||
The necessary tools are detailed below.
|
The necessary tools are detailed below.
|
||||||
|
|
||||||
## Formatting tools
|
- **black**
|
||||||
|
|
||||||
The Synapse codebase uses [black](https://pypi.org/project/black/) as an
|
The Synapse codebase uses `black <https://pypi.org/project/black/>`_ as an
|
||||||
opinionated code formatter, ensuring all comitted code is properly
|
opinionated code formatter, ensuring all comitted code is properly
|
||||||
formatted.
|
formatted.
|
||||||
|
|
||||||
First install ``black`` with::
|
First install ``black`` with::
|
||||||
|
|
||||||
pip install --upgrade black
|
pip install --upgrade black
|
||||||
|
|
||||||
Have ``black`` auto-format your code (it shouldn't change any
|
Have ``black`` auto-format your code (it shouldn't change any functionality)
|
||||||
functionality) with::
|
with::
|
||||||
|
|
||||||
black . --exclude="\.tox|build|env"
|
black . --exclude="\.tox|build|env"
|
||||||
|
|
||||||
- **flake8**
|
- **flake8**
|
||||||
|
|
||||||
@@ -54,17 +58,16 @@ functionality is supported in your editor for a more convenient development
|
|||||||
workflow. It is not, however, recommended to run ``flake8`` on save as it
|
workflow. It is not, however, recommended to run ``flake8`` on save as it
|
||||||
takes a while and is very resource intensive.
|
takes a while and is very resource intensive.
|
||||||
|
|
||||||
## General rules
|
General rules
|
||||||
|
-------------
|
||||||
|
|
||||||
- **Naming**:
|
- **Naming**:
|
||||||
|
|
||||||
- Use camel case for class and type names
|
- Use camel case for class and type names
|
||||||
- Use underscores for functions and variables.
|
- Use underscores for functions and variables.
|
||||||
|
|
||||||
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
- **Docstrings**: should follow the `google code style
|
||||||
|
<https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings>`_.
|
||||||
- **Comments**: should follow the `google code style
|
|
||||||
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
|
||||||
This is so that we can generate documentation with `sphinx
|
This is so that we can generate documentation with `sphinx
|
||||||
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||||
`examples
|
`examples
|
||||||
@@ -73,6 +76,8 @@ takes a while and is very resource intensive.
|
|||||||
|
|
||||||
- **Imports**:
|
- **Imports**:
|
||||||
|
|
||||||
|
- Imports should be sorted by ``isort`` as described above.
|
||||||
|
|
||||||
- Prefer to import classes and functions rather than packages or modules.
|
- Prefer to import classes and functions rather than packages or modules.
|
||||||
|
|
||||||
Example::
|
Example::
|
||||||
@@ -92,25 +97,84 @@ takes a while and is very resource intensive.
|
|||||||
This goes against the advice in the Google style guide, but it means that
|
This goes against the advice in the Google style guide, but it means that
|
||||||
errors in the name are caught early (at import time).
|
errors in the name are caught early (at import time).
|
||||||
|
|
||||||
- Multiple imports from the same package can be combined onto one line::
|
|
||||||
|
|
||||||
from synapse.types import GroupID, RoomID, UserID
|
|
||||||
|
|
||||||
An effort should be made to keep the individual imports in alphabetical
|
|
||||||
order.
|
|
||||||
|
|
||||||
If the list becomes long, wrap it with parentheses and split it over
|
|
||||||
multiple lines.
|
|
||||||
|
|
||||||
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
|
||||||
imports should be grouped in the following order, with a blank line between
|
|
||||||
each group:
|
|
||||||
|
|
||||||
1. standard library imports
|
|
||||||
2. related third party imports
|
|
||||||
3. local application/library specific imports
|
|
||||||
|
|
||||||
- Imports within each group should be sorted alphabetically by module name.
|
|
||||||
|
|
||||||
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||||
imports (``from .types import UserID``).
|
imports (``from .types import UserID``).
|
||||||
|
|
||||||
|
Configuration file format
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
The `sample configuration file <./sample_config.yaml>`_ acts as a reference to
|
||||||
|
Synapse's configuration options for server administrators. Remember that many
|
||||||
|
readers will be unfamiliar with YAML and server administration in general, so
|
||||||
|
that it is important that the file be as easy to understand as possible, which
|
||||||
|
includes following a consistent format.
|
||||||
|
|
||||||
|
Some guidelines follow:
|
||||||
|
|
||||||
|
* Sections should be separated with a heading consisting of a single line
|
||||||
|
prefixed and suffixed with ``##``. There should be **two** blank lines
|
||||||
|
before the section header, and **one** after.
|
||||||
|
|
||||||
|
* Each option should be listed in the file with the following format:
|
||||||
|
|
||||||
|
* A comment describing the setting. Each line of this comment should be
|
||||||
|
prefixed with a hash (``#``) and a space.
|
||||||
|
|
||||||
|
The comment should describe the default behaviour (ie, what happens if
|
||||||
|
the setting is omitted), as well as what the effect will be if the
|
||||||
|
setting is changed.
|
||||||
|
|
||||||
|
Often, the comment end with something like "uncomment the
|
||||||
|
following to \<do action>".
|
||||||
|
|
||||||
|
* A line consisting of only ``#``.
|
||||||
|
|
||||||
|
* A commented-out example setting, prefixed with only ``#``.
|
||||||
|
|
||||||
|
For boolean (on/off) options, convention is that this example should be
|
||||||
|
the *opposite* to the default (so the comment will end with "Uncomment
|
||||||
|
the following to enable [or disable] \<feature\>." For other options,
|
||||||
|
the example should give some non-default value which is likely to be
|
||||||
|
useful to the reader.
|
||||||
|
|
||||||
|
* There should be a blank line between each option.
|
||||||
|
|
||||||
|
* Where several settings are grouped into a single dict, *avoid* the
|
||||||
|
convention where the whole block is commented out, resulting in comment
|
||||||
|
lines starting ``# #``, as this is hard to read and confusing to
|
||||||
|
edit. Instead, leave the top-level config option uncommented, and follow
|
||||||
|
the conventions above for sub-options. Ensure that your code correctly
|
||||||
|
handles the top-level option being set to ``None`` (as it will be if no
|
||||||
|
sub-options are enabled).
|
||||||
|
|
||||||
|
* Lines should be wrapped at 80 characters.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
## Frobnication ##
|
||||||
|
|
||||||
|
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||||
|
# To enable it, uncomment the following.
|
||||||
|
#
|
||||||
|
#frobnicator_enabled: true
|
||||||
|
|
||||||
|
# By default, the frobnicator will frobnicate with the default frobber.
|
||||||
|
# The following will make it use an alternative frobber.
|
||||||
|
#
|
||||||
|
#frobincator_frobber: special_frobber
|
||||||
|
|
||||||
|
# Settings for the frobber
|
||||||
|
#
|
||||||
|
frobber:
|
||||||
|
# frobbing speed. Defaults to 1.
|
||||||
|
#
|
||||||
|
#speed: 10
|
||||||
|
|
||||||
|
# frobbing distance. Defaults to 1000.
|
||||||
|
#
|
||||||
|
#distance: 100
|
||||||
|
|
||||||
|
Note that the sample configuration is generated from the synapse code and is
|
||||||
|
maintained by a script, ``scripts-dev/generate_sample_config``. Making sure
|
||||||
|
that the output from this script matches the desired format is left as an
|
||||||
|
exercise for the reader!
|
||||||
|
|||||||
+20
-20
@@ -1,4 +1,4 @@
|
|||||||
Log contexts
|
Log Contexts
|
||||||
============
|
============
|
||||||
|
|
||||||
.. contents::
|
.. contents::
|
||||||
@@ -12,7 +12,7 @@ record.
|
|||||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
which requests were responsible for high CPU use or database activity.
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
The ``synapse.logging.context`` module provides a facilities for managing the
|
||||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
Deferreds make the whole thing complicated, so this document describes how it
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
@@ -27,19 +27,19 @@ found them:
|
|||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
from synapse.util import logcontext # omitted from future snippets
|
from synapse.logging import context # omitted from future snippets
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
request_context = logcontext.LoggingContext()
|
request_context = context.LoggingContext()
|
||||||
|
|
||||||
calling_context = logcontext.LoggingContext.current_context()
|
calling_context = context.LoggingContext.current_context()
|
||||||
logcontext.LoggingContext.set_current_context(request_context)
|
context.LoggingContext.set_current_context(request_context)
|
||||||
try:
|
try:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
finally:
|
finally:
|
||||||
logcontext.LoggingContext.set_current_context(calling_context)
|
context.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
logger.debug("phew") # this will be logged against request_id
|
logger.debug("phew") # this will be logged against request_id
|
||||||
@@ -51,7 +51,7 @@ written much more succinctly as:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
yield do_request_handling()
|
yield do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -148,7 +148,7 @@ call any other functions.
|
|||||||
d = more_stuff()
|
d = more_stuff()
|
||||||
result = yield d # also fine, of course
|
result = yield d # also fine, of course
|
||||||
|
|
||||||
defer.returnValue(result)
|
return result
|
||||||
|
|
||||||
def nonInlineCallbacksFun():
|
def nonInlineCallbacksFun():
|
||||||
logger.debug("just a wrapper really")
|
logger.debug("just a wrapper really")
|
||||||
@@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
|
|||||||
external code. We need to make it follow our rules.
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
which returns a deferred which will run its callbacks after a given number of
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
seconds. That might look like:
|
seconds. That might look like:
|
||||||
|
|
||||||
@@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
|
|||||||
This technique works equally for external functions which return deferreds,
|
This technique works equally for external functions which return deferreds,
|
||||||
or deferreds we have made ourselves.
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
You can also use ``context.make_deferred_yieldable``, which just does the
|
||||||
boilerplate for you, so the above could be written:
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def sleep(seconds):
|
def sleep(seconds):
|
||||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
Fire-and-forget
|
Fire-and-forget
|
||||||
@@ -279,7 +279,7 @@ Obviously that option means that the operations done in
|
|||||||
that might be fixed by setting a different logcontext via a ``with
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
The second option is to use ``context.run_in_background``, which wraps a
|
||||||
function so that it doesn't reset the logcontext even when it returns an
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
@@ -293,7 +293,7 @@ It can be used like this:
|
|||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
yield foreground_operation()
|
yield foreground_operation()
|
||||||
|
|
||||||
logcontext.run_in_background(background_operation)
|
context.run_in_background(background_operation)
|
||||||
|
|
||||||
# this will now be logged against the request context
|
# this will now be logged against the request context
|
||||||
logger.debug("Request handling complete")
|
logger.debug("Request handling complete")
|
||||||
@@ -332,7 +332,7 @@ gathered:
|
|||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
In this case particularly, though, option two, of using
|
In this case particularly, though, option two, of using
|
||||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
``context.preserve_fn`` almost certainly makes more sense, so that
|
||||||
``operation1`` and ``operation2`` are both logged against the original
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
logcontext. This looks like:
|
logcontext. This looks like:
|
||||||
|
|
||||||
@@ -340,8 +340,8 @@ logcontext. This looks like:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
d1 = logcontext.preserve_fn(operation1)()
|
d1 = context.preserve_fn(operation1)()
|
||||||
d2 = logcontext.preserve_fn(operation2)()
|
d2 = context.preserve_fn(operation2)()
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
@@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
d = do_request_handling()
|
d = do_request_handling()
|
||||||
|
|
||||||
@@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
|
|||||||
|
|
||||||
The business of a Deferred which runs its callbacks in the original logcontext
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
isn't hard to achieve — we have it today, in the shape of
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
``logcontext._PreservingContextDeferred``:
|
``context._PreservingContextDeferred``:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
|
|||||||
@@ -59,6 +59,108 @@ How to monitor Synapse metrics using Prometheus
|
|||||||
Restart Prometheus.
|
Restart Prometheus.
|
||||||
|
|
||||||
|
|
||||||
|
Renaming of metrics & deprecation of old names in 1.2
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
|
||||||
|
upstream ``prometheus_client``. The old names are considered deprecated and will
|
||||||
|
be removed in a future version of Synapse.
|
||||||
|
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| New Name | Old Name |
|
||||||
|
+=============================================================================+=======================================================================+
|
||||||
|
| python_gc_objects_collected_total | python_gc_objects_collected |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| python_gc_collections_total | python_gc_collections |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| process_cpu_seconds_total | process_cpu_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_start_count_total | synapse_background_process_start_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
|
||||||
|
|
||||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||||
---------------------------------------------------------------------------------
|
---------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,100 @@
|
|||||||
|
===========
|
||||||
|
OpenTracing
|
||||||
|
===========
|
||||||
|
|
||||||
|
Background
|
||||||
|
----------
|
||||||
|
|
||||||
|
OpenTracing is a semi-standard being adopted by a number of distributed tracing
|
||||||
|
platforms. It is a common api for facilitating vendor-agnostic tracing
|
||||||
|
instrumentation. That is, we can use the OpenTracing api and select one of a
|
||||||
|
number of tracer implementations to do the heavy lifting in the background.
|
||||||
|
Our current selected implementation is Jaeger.
|
||||||
|
|
||||||
|
OpenTracing is a tool which gives an insight into the causal relationship of
|
||||||
|
work done in and between servers. The servers each track events and report them
|
||||||
|
to a centralised server - in Synapse's case: Jaeger. The basic unit used to
|
||||||
|
represent events is the span. The span roughly represents a single piece of work
|
||||||
|
that was done and the time at which it occurred. A span can have child spans,
|
||||||
|
meaning that the work of the child had to be completed for the parent span to
|
||||||
|
complete, or it can have follow-on spans which represent work that is undertaken
|
||||||
|
as a result of the parent but is not depended on by the parent to in order to
|
||||||
|
finish.
|
||||||
|
|
||||||
|
Since this is undertaken in a distributed environment a request to another
|
||||||
|
server, such as an RPC or a simple GET, can be considered a span (a unit or
|
||||||
|
work) for the local server. This causal link is what OpenTracing aims to
|
||||||
|
capture and visualise. In order to do this metadata about the local server's
|
||||||
|
span, i.e the 'span context', needs to be included with the request to the
|
||||||
|
remote.
|
||||||
|
|
||||||
|
It is up to the remote server to decide what it does with the spans
|
||||||
|
it creates. This is called the sampling policy and it can be configured
|
||||||
|
through Jaeger's settings.
|
||||||
|
|
||||||
|
For OpenTracing concepts see
|
||||||
|
https://opentracing.io/docs/overview/what-is-tracing/.
|
||||||
|
|
||||||
|
For more information about Jaeger's implementation see
|
||||||
|
https://www.jaegertracing.io/docs/
|
||||||
|
|
||||||
|
=====================
|
||||||
|
Seting up OpenTracing
|
||||||
|
=====================
|
||||||
|
|
||||||
|
To receive OpenTracing spans, start up a Jaeger server. This can be done
|
||||||
|
using docker like so:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker run -d --name jaeger
|
||||||
|
-p 6831:6831/udp \
|
||||||
|
-p 6832:6832/udp \
|
||||||
|
-p 5778:5778 \
|
||||||
|
-p 16686:16686 \
|
||||||
|
-p 14268:14268 \
|
||||||
|
jaegertracing/all-in-one:1.13
|
||||||
|
|
||||||
|
Latest documentation is probably at
|
||||||
|
https://www.jaegertracing.io/docs/1.13/getting-started/
|
||||||
|
|
||||||
|
|
||||||
|
Enable OpenTracing in Synapse
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
OpenTracing is not enabled by default. It must be enabled in the homeserver
|
||||||
|
config by uncommenting the config options under ``opentracing`` as shown in
|
||||||
|
the `sample config <./sample_config.yaml>`_. For example:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
opentracing:
|
||||||
|
tracer_enabled: true
|
||||||
|
homeserver_whitelist:
|
||||||
|
- "mytrustedhomeserver.org"
|
||||||
|
- "*.myotherhomeservers.com"
|
||||||
|
|
||||||
|
Homeserver whitelisting
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
The homeserver whitelist is configured using regular expressions. A list of regular
|
||||||
|
expressions can be given and their union will be compared when propagating any
|
||||||
|
spans contexts to another homeserver.
|
||||||
|
|
||||||
|
Though it's mostly safe to send and receive span contexts to and from
|
||||||
|
untrusted users since span contexts are usually opaque ids it can lead to
|
||||||
|
two problems, namely:
|
||||||
|
|
||||||
|
- If the span context is marked as sampled by the sending homeserver the receiver will
|
||||||
|
sample it. Therefore two homeservers with wildly different sampling policies
|
||||||
|
could incur higher sampling counts than intended.
|
||||||
|
- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse
|
||||||
|
but that doesn't prevent another server sending you baggage which will be logged
|
||||||
|
to OpenTracing's logs.
|
||||||
|
|
||||||
|
==================
|
||||||
|
Configuring Jaeger
|
||||||
|
==================
|
||||||
|
|
||||||
|
Sampling strategies can be set as in this document:
|
||||||
|
https://www.jaegertracing.io/docs/1.13/sampling/
|
||||||
+18
-5
@@ -11,7 +11,9 @@ a postgres database.
|
|||||||
|
|
||||||
* If you are using the `matrix.org debian/ubuntu
|
* If you are using the `matrix.org debian/ubuntu
|
||||||
packages <../INSTALL.md#matrixorg-packages>`_,
|
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||||
the necessary libraries will already be installed.
|
the necessary python library will already be installed, but you will need to
|
||||||
|
ensure the low-level postgres library is installed, which you can do with
|
||||||
|
``apt install libpq5``.
|
||||||
|
|
||||||
* For other pre-built packages, please consult the documentation from the
|
* For other pre-built packages, please consult the documentation from the
|
||||||
relevant package.
|
relevant package.
|
||||||
@@ -34,9 +36,14 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
|
|||||||
su - postgres
|
su - postgres
|
||||||
createuser --pwprompt synapse_user
|
createuser --pwprompt synapse_user
|
||||||
|
|
||||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
Before you can authenticate with the ``synapse_user``, you must create a
|
||||||
would not be able to store UTF8 strings. To create a database with the correct
|
database that it can access. To create a database, first connect to the database
|
||||||
encoding use, e.g.::
|
with your database user::
|
||||||
|
|
||||||
|
su - postgres
|
||||||
|
psql
|
||||||
|
|
||||||
|
and then run::
|
||||||
|
|
||||||
CREATE DATABASE synapse
|
CREATE DATABASE synapse
|
||||||
ENCODING 'UTF8'
|
ENCODING 'UTF8'
|
||||||
@@ -46,7 +53,13 @@ encoding use, e.g.::
|
|||||||
OWNER synapse_user;
|
OWNER synapse_user;
|
||||||
|
|
||||||
This would create an appropriate database named ``synapse`` owned by the
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
``synapse_user`` user (which must already exist).
|
``synapse_user`` user (which must already have been created as above).
|
||||||
|
|
||||||
|
Note that the PostgreSQL database *must* have the correct encoding set (as
|
||||||
|
shown above), otherwise it will not be able to store UTF8 strings.
|
||||||
|
|
||||||
|
You may need to enable password authentication so ``synapse_user`` can connect
|
||||||
|
to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
|
||||||
|
|
||||||
Tuning Postgres
|
Tuning Postgres
|
||||||
===============
|
===============
|
||||||
|
|||||||
@@ -48,6 +48,8 @@ Let's assume that we expect clients to connect to our server at
|
|||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
|
||||||
|
|
||||||
* Caddy::
|
* Caddy::
|
||||||
|
|
||||||
|
|||||||
+177
-55
@@ -278,6 +278,23 @@ listeners:
|
|||||||
# Used by phonehome stats to group together related servers.
|
# Used by phonehome stats to group together related servers.
|
||||||
#server_context: context
|
#server_context: context
|
||||||
|
|
||||||
|
# Resource-constrained Homeserver Settings
|
||||||
|
#
|
||||||
|
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||||
|
# checked before a user joins a new remote room. If it is above
|
||||||
|
# limit_remote_rooms.complexity, it will disallow joining or
|
||||||
|
# instantly leave.
|
||||||
|
#
|
||||||
|
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||||
|
# displayed to the user when a room above the complexity threshold has
|
||||||
|
# its join cancelled.
|
||||||
|
#
|
||||||
|
# Uncomment the below lines to enable:
|
||||||
|
#limit_remote_rooms:
|
||||||
|
# enabled: True
|
||||||
|
# complexity: 1.0
|
||||||
|
# complexity_error: "This room is too complex."
|
||||||
|
|
||||||
# Whether to require a user to be in the room to add an alias to it.
|
# Whether to require a user to be in the room to add an alias to it.
|
||||||
# Defaults to 'true'.
|
# Defaults to 'true'.
|
||||||
#
|
#
|
||||||
@@ -493,6 +510,9 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
|||||||
# - one for login that ratelimits login requests based on the account the
|
# - one for login that ratelimits login requests based on the account the
|
||||||
# client is attempting to log into, based on the amount of failed login
|
# client is attempting to log into, based on the amount of failed login
|
||||||
# attempts for this account.
|
# attempts for this account.
|
||||||
|
# - one for ratelimiting redactions by room admins. If this is not explicitly
|
||||||
|
# set then it uses the same ratelimiting as per rc_message. This is useful
|
||||||
|
# to allow room admins to deal with abuse quickly.
|
||||||
#
|
#
|
||||||
# The defaults are as shown below.
|
# The defaults are as shown below.
|
||||||
#
|
#
|
||||||
@@ -514,6 +534,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
|||||||
# failed_attempts:
|
# failed_attempts:
|
||||||
# per_second: 0.17
|
# per_second: 0.17
|
||||||
# burst_count: 3
|
# burst_count: 3
|
||||||
|
#
|
||||||
|
#rc_admin_redaction:
|
||||||
|
# per_second: 1
|
||||||
|
# burst_count: 50
|
||||||
|
|
||||||
|
|
||||||
# Ratelimiting settings for incoming federation
|
# Ratelimiting settings for incoming federation
|
||||||
@@ -548,6 +572,13 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Media Store ##
|
||||||
|
|
||||||
|
# Enable the media store service in the Synapse master. Uncomment the
|
||||||
|
# following if you are using a separate media store worker.
|
||||||
|
#
|
||||||
|
#enable_media_repo: false
|
||||||
|
|
||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
#
|
#
|
||||||
media_store_path: "DATADIR/media_store"
|
media_store_path: "DATADIR/media_store"
|
||||||
@@ -785,6 +816,27 @@ uploads_path: "DATADIR/uploads"
|
|||||||
# period: 6w
|
# period: 6w
|
||||||
# renew_at: 1w
|
# renew_at: 1w
|
||||||
# renew_email_subject: "Renew your %(app)s account"
|
# renew_email_subject: "Renew your %(app)s account"
|
||||||
|
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||||
|
# # user when trying to renew an account. Optional, defaults to
|
||||||
|
# # synapse/res/templates.
|
||||||
|
# template_dir: "res/templates"
|
||||||
|
# # HTML to be displayed to the user after they successfully renewed their
|
||||||
|
# # account. Optional.
|
||||||
|
# account_renewed_html_path: "account_renewed.html"
|
||||||
|
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||||
|
# # renewal token. Optional.
|
||||||
|
# invalid_token_html_path: "invalid_token.html"
|
||||||
|
|
||||||
|
# Time that a user's session remains valid for, after they log in.
|
||||||
|
#
|
||||||
|
# Note that this is not currently compatible with guest logins.
|
||||||
|
#
|
||||||
|
# Note also that this is calculated at login time: changes are not applied
|
||||||
|
# retrospectively to users who have already logged in.
|
||||||
|
#
|
||||||
|
# By default, this is infinite.
|
||||||
|
#
|
||||||
|
#session_lifetime: 24h
|
||||||
|
|
||||||
# The user must provide all of the below types of 3PID when registering.
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
#
|
#
|
||||||
@@ -914,10 +966,6 @@ uploads_path: "DATADIR/uploads"
|
|||||||
#
|
#
|
||||||
# macaroon_secret_key: <PRIVATE STRING>
|
# macaroon_secret_key: <PRIVATE STRING>
|
||||||
|
|
||||||
# Used to enable access token expiration.
|
|
||||||
#
|
|
||||||
#expire_access_token: False
|
|
||||||
|
|
||||||
# a secret which is used to calculate HMACs for form values, to stop
|
# a secret which is used to calculate HMACs for form values, to stop
|
||||||
# falsification of values. Must be specified for the User Consent
|
# falsification of values. Must be specified for the User Consent
|
||||||
# forms to work.
|
# forms to work.
|
||||||
@@ -990,12 +1038,13 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
|
|||||||
|
|
||||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||||
#
|
#
|
||||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
# At least one of `sp_config` or `config_path` must be set in this section to
|
||||||
# See pysaml2 docs for format of config.
|
# enable SAML login.
|
||||||
#
|
#
|
||||||
# Default values will be used for the 'entityid' and 'service' settings,
|
# (You will probably also want to set the following options to `false` to
|
||||||
# so it is not normally necessary to specify them unless you need to
|
# disable the regular login/registration flows:
|
||||||
# override them.
|
# * enable_registration
|
||||||
|
# * password_config.enabled
|
||||||
#
|
#
|
||||||
# Once SAML support is enabled, a metadata file will be exposed at
|
# Once SAML support is enabled, a metadata file will be exposed at
|
||||||
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
|
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
|
||||||
@@ -1003,52 +1052,85 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
|
|||||||
# the IdP to use an ACS location of
|
# the IdP to use an ACS location of
|
||||||
# https://<server>:<port>/_matrix/saml2/authn_response.
|
# https://<server>:<port>/_matrix/saml2/authn_response.
|
||||||
#
|
#
|
||||||
#saml2_config:
|
saml2_config:
|
||||||
# sp_config:
|
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||||
# # point this to the IdP's metadata. You can use either a local file or
|
# See pysaml2 docs for format of config.
|
||||||
# # (preferably) a URL.
|
#
|
||||||
# metadata:
|
# Default values will be used for the 'entityid' and 'service' settings,
|
||||||
# #local: ["saml2/idp.xml"]
|
# so it is not normally necessary to specify them unless you need to
|
||||||
# remote:
|
# override them.
|
||||||
# - url: https://our_idp/metadata.xml
|
#
|
||||||
#
|
#sp_config:
|
||||||
# # By default, the user has to go to our login page first. If you'd like to
|
# # point this to the IdP's metadata. You can use either a local file or
|
||||||
# # allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
# # (preferably) a URL.
|
||||||
# # 'service.sp' section:
|
# metadata:
|
||||||
# #
|
# #local: ["saml2/idp.xml"]
|
||||||
# #service:
|
# remote:
|
||||||
# # sp:
|
# - url: https://our_idp/metadata.xml
|
||||||
# # allow_unsolicited: True
|
#
|
||||||
#
|
# # By default, the user has to go to our login page first. If you'd like
|
||||||
# # The examples below are just used to generate our metadata xml, and you
|
# # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
||||||
# # may well not need it, depending on your setup. Alternatively you
|
# # 'service.sp' section:
|
||||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
# #
|
||||||
#
|
# #service:
|
||||||
# description: ["My awesome SP", "en"]
|
# # sp:
|
||||||
# name: ["Test SP", "en"]
|
# # allow_unsolicited: true
|
||||||
#
|
#
|
||||||
# organization:
|
# # The examples below are just used to generate our metadata xml, and you
|
||||||
# name: Example com
|
# # may well not need them, depending on your setup. Alternatively you
|
||||||
# display_name:
|
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||||
# - ["Example co", "en"]
|
#
|
||||||
# url: "http://example.com"
|
# description: ["My awesome SP", "en"]
|
||||||
#
|
# name: ["Test SP", "en"]
|
||||||
# contact_person:
|
#
|
||||||
# - given_name: Bob
|
# organization:
|
||||||
# sur_name: "the Sysadmin"
|
# name: Example com
|
||||||
# email_address": ["admin@example.com"]
|
# display_name:
|
||||||
# contact_type": technical
|
# - ["Example co", "en"]
|
||||||
#
|
# url: "http://example.com"
|
||||||
# # Instead of putting the config inline as above, you can specify a
|
#
|
||||||
# # separate pysaml2 configuration file:
|
# contact_person:
|
||||||
# #
|
# - given_name: Bob
|
||||||
# config_path: "CONFDIR/sp_conf.py"
|
# sur_name: "the Sysadmin"
|
||||||
#
|
# email_address": ["admin@example.com"]
|
||||||
# # the lifetime of a SAML session. This defines how long a user has to
|
# contact_type": technical
|
||||||
# # complete the authentication process, if allow_unsolicited is unset.
|
|
||||||
# # The default is 5 minutes.
|
# Instead of putting the config inline as above, you can specify a
|
||||||
# #
|
# separate pysaml2 configuration file:
|
||||||
# # saml_session_lifetime: 5m
|
#
|
||||||
|
#config_path: "CONFDIR/sp_conf.py"
|
||||||
|
|
||||||
|
# the lifetime of a SAML session. This defines how long a user has to
|
||||||
|
# complete the authentication process, if allow_unsolicited is unset.
|
||||||
|
# The default is 5 minutes.
|
||||||
|
#
|
||||||
|
#saml_session_lifetime: 5m
|
||||||
|
|
||||||
|
# The SAML attribute (after mapping via the attribute maps) to use to derive
|
||||||
|
# the Matrix ID from. 'uid' by default.
|
||||||
|
#
|
||||||
|
#mxid_source_attribute: displayName
|
||||||
|
|
||||||
|
# The mapping system to use for mapping the saml attribute onto a matrix ID.
|
||||||
|
# Options include:
|
||||||
|
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||||
|
# * 'dotreplace' (which replaces unpermitted characters with '.').
|
||||||
|
# The default is 'hexencode'.
|
||||||
|
#
|
||||||
|
#mxid_mapping: dotreplace
|
||||||
|
|
||||||
|
# In previous versions of synapse, the mapping from SAML attribute to MXID was
|
||||||
|
# always calculated dynamically rather than stored in a table. For backwards-
|
||||||
|
# compatibility, we will look for user_ids matching such a pattern before
|
||||||
|
# creating a new account.
|
||||||
|
#
|
||||||
|
# This setting controls the SAML attribute which will be used for this
|
||||||
|
# backwards-compatibility lookup. Typically it should be 'uid', but if the
|
||||||
|
# attribute maps are changed, it may be necessary to change it.
|
||||||
|
#
|
||||||
|
# The default is 'uid'.
|
||||||
|
#
|
||||||
|
#grandfathered_mxid_source_attribute: upn
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1395,3 +1477,43 @@ password_config:
|
|||||||
# module: "my_custom_project.SuperRulesSet"
|
# module: "my_custom_project.SuperRulesSet"
|
||||||
# config:
|
# config:
|
||||||
# example_option: 'things'
|
# example_option: 'things'
|
||||||
|
|
||||||
|
|
||||||
|
## Opentracing ##
|
||||||
|
|
||||||
|
# These settings enable opentracing, which implements distributed tracing.
|
||||||
|
# This allows you to observe the causal chains of events across servers
|
||||||
|
# including requests, key lookups etc., across any server running
|
||||||
|
# synapse or any other other services which supports opentracing
|
||||||
|
# (specifically those implemented with Jaeger).
|
||||||
|
#
|
||||||
|
opentracing:
|
||||||
|
# tracing is disabled by default. Uncomment the following line to enable it.
|
||||||
|
#
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||||
|
# See docs/opentracing.rst
|
||||||
|
# This is a list of regexes which are matched against the server_name of the
|
||||||
|
# homeserver.
|
||||||
|
#
|
||||||
|
# By defult, it is empty, so no servers are matched.
|
||||||
|
#
|
||||||
|
#homeserver_whitelist:
|
||||||
|
# - ".*"
|
||||||
|
|
||||||
|
# Jaeger can be configured to sample traces at different rates.
|
||||||
|
# All configuration options provided by Jaeger can be set here.
|
||||||
|
# Jaeger's configuration mostly related to trace sampling which
|
||||||
|
# is documented here:
|
||||||
|
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||||
|
#
|
||||||
|
#jaeger_config:
|
||||||
|
# sampler:
|
||||||
|
# type: const
|
||||||
|
# param: 1
|
||||||
|
|
||||||
|
# Logging whether spans were started and reported
|
||||||
|
#
|
||||||
|
# logging:
|
||||||
|
# false
|
||||||
|
|||||||
@@ -206,6 +206,13 @@ Handles the media repository. It can handle all endpoints starting with::
|
|||||||
|
|
||||||
/_matrix/media/
|
/_matrix/media/
|
||||||
|
|
||||||
|
And the following regular expressions matching media-specific administration
|
||||||
|
APIs::
|
||||||
|
|
||||||
|
^/_synapse/admin/v1/purge_media_cache$
|
||||||
|
^/_synapse/admin/v1/room/.*/media$
|
||||||
|
^/_synapse/admin/v1/quarantine_media/.*$
|
||||||
|
|
||||||
You should also set ``enable_media_repo: False`` in the shared configuration
|
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||||
file to stop the main synapse running background jobs related to managing the
|
file to stop the main synapse running background jobs related to managing the
|
||||||
media repository.
|
media repository.
|
||||||
|
|||||||
@@ -14,6 +14,11 @@
|
|||||||
name = "Bugfixes"
|
name = "Bugfixes"
|
||||||
showcontent = true
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "docker"
|
||||||
|
name = "Updates to the Docker image"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
[[tool.towncrier.type]]
|
[[tool.towncrier.type]]
|
||||||
directory = "doc"
|
directory = "doc"
|
||||||
name = "Improved Documentation"
|
name = "Improved Documentation"
|
||||||
@@ -39,6 +44,8 @@ exclude = '''
|
|||||||
| \.git # root of the project
|
| \.git # root of the project
|
||||||
| \.tox
|
| \.tox
|
||||||
| \.venv
|
| \.venv
|
||||||
|
| \.env
|
||||||
|
| env
|
||||||
| _build
|
| _build
|
||||||
| _trial_temp.*
|
| _trial_temp.*
|
||||||
| build
|
| build
|
||||||
|
|||||||
Executable
+12
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Runs linting scripts over the local Synapse checkout
|
||||||
|
# isort - sorts import statements
|
||||||
|
# flake8 - lints and finds mistakes
|
||||||
|
# black - opinionated code formatter
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
isort -y -rc synapse tests scripts-dev scripts
|
||||||
|
flake8 synapse tests
|
||||||
|
python3 -m black synapse tests scripts-dev scripts
|
||||||
+1
-1
@@ -35,4 +35,4 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.1.0rc1"
|
__version__ = "1.3.2-alpha.4+modular"
|
||||||
|
|||||||
+76
-150
@@ -25,7 +25,13 @@ from twisted.internet import defer
|
|||||||
import synapse.types
|
import synapse.types
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
from synapse.api.errors import (
|
||||||
|
AuthError,
|
||||||
|
Codes,
|
||||||
|
InvalidClientTokenError,
|
||||||
|
MissingClientTokenError,
|
||||||
|
ResourceLimitError,
|
||||||
|
)
|
||||||
from synapse.config.server import is_threepid_reserved
|
from synapse.config.server import is_threepid_reserved
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||||
@@ -63,7 +69,6 @@ class Auth(object):
|
|||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
|
||||||
|
|
||||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||||
register_cache("cache", "token_cache", self.token_cache)
|
register_cache("cache", "token_cache", self.token_cache)
|
||||||
@@ -123,7 +128,7 @@ class Auth(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self._check_joined_room(member, user_id, room_id)
|
self._check_joined_room(member, user_id, room_id)
|
||||||
defer.returnValue(member)
|
return member
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_user_was_in_room(self, room_id, user_id):
|
def check_user_was_in_room(self, room_id, user_id):
|
||||||
@@ -151,13 +156,13 @@ class Auth(object):
|
|||||||
if forgot:
|
if forgot:
|
||||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||||
|
|
||||||
defer.returnValue(member)
|
return member
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_host_in_room(self, room_id, host):
|
def check_host_in_room(self, room_id, host):
|
||||||
with Measure(self.clock, "check_host_in_room"):
|
with Measure(self.clock, "check_host_in_room"):
|
||||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||||
defer.returnValue(latest_event_ids)
|
return latest_event_ids
|
||||||
|
|
||||||
def _check_joined_room(self, member, user_id, room_id):
|
def _check_joined_room(self, member, user_id, room_id):
|
||||||
if not member or member.membership != Membership.JOIN:
|
if not member or member.membership != Membership.JOIN:
|
||||||
@@ -189,18 +194,17 @@ class Auth(object):
|
|||||||
Returns:
|
Returns:
|
||||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
InvalidClientCredentialsError if no user by that token exists or the token
|
||||||
|
is invalid.
|
||||||
|
AuthError if access is denied for the user in the access token
|
||||||
"""
|
"""
|
||||||
# Can optionally look elsewhere in the request (e.g. headers)
|
|
||||||
try:
|
try:
|
||||||
ip_addr = self.hs.get_ip_from_request(request)
|
ip_addr = self.hs.get_ip_from_request(request)
|
||||||
user_agent = request.requestHeaders.getRawHeaders(
|
user_agent = request.requestHeaders.getRawHeaders(
|
||||||
b"User-Agent", default=[b""]
|
b"User-Agent", default=[b""]
|
||||||
)[0].decode("ascii", "surrogateescape")
|
)[0].decode("ascii", "surrogateescape")
|
||||||
|
|
||||||
access_token = self.get_access_token_from_request(
|
access_token = self.get_access_token_from_request(request)
|
||||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
|
||||||
)
|
|
||||||
|
|
||||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||||
if user_id:
|
if user_id:
|
||||||
@@ -215,9 +219,7 @@ class Auth(object):
|
|||||||
device_id="dummy-device", # stubbed
|
device_id="dummy-device", # stubbed
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(
|
return synapse.types.create_requester(user_id, app_service=app_service)
|
||||||
synapse.types.create_requester(user_id, app_service=app_service)
|
|
||||||
)
|
|
||||||
|
|
||||||
user_info = yield self.get_user_by_access_token(access_token, rights)
|
user_info = yield self.get_user_by_access_token(access_token, rights)
|
||||||
user = user_info["user"]
|
user = user_info["user"]
|
||||||
@@ -258,45 +260,37 @@ class Auth(object):
|
|||||||
|
|
||||||
request.authenticated_entity = user.to_string()
|
request.authenticated_entity = user.to_string()
|
||||||
|
|
||||||
defer.returnValue(
|
return synapse.types.create_requester(
|
||||||
synapse.types.create_requester(
|
user, token_id, is_guest, device_id, app_service=app_service
|
||||||
user, token_id, is_guest, device_id, app_service=app_service
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise AuthError(
|
raise MissingClientTokenError()
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Missing access token.",
|
|
||||||
errcode=Codes.MISSING_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_appservice_user_id(self, request):
|
def _get_appservice_user_id(self, request):
|
||||||
app_service = self.store.get_app_service_by_token(
|
app_service = self.store.get_app_service_by_token(
|
||||||
self.get_access_token_from_request(
|
self.get_access_token_from_request(request)
|
||||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if app_service is None:
|
if app_service is None:
|
||||||
defer.returnValue((None, None))
|
return (None, None)
|
||||||
|
|
||||||
if app_service.ip_range_whitelist:
|
if app_service.ip_range_whitelist:
|
||||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||||
if ip_address not in app_service.ip_range_whitelist:
|
if ip_address not in app_service.ip_range_whitelist:
|
||||||
defer.returnValue((None, None))
|
return (None, None)
|
||||||
|
|
||||||
if b"user_id" not in request.args:
|
if b"user_id" not in request.args:
|
||||||
defer.returnValue((app_service.sender, app_service))
|
return (app_service.sender, app_service)
|
||||||
|
|
||||||
user_id = request.args[b"user_id"][0].decode("utf8")
|
user_id = request.args[b"user_id"][0].decode("utf8")
|
||||||
if app_service.sender == user_id:
|
if app_service.sender == user_id:
|
||||||
defer.returnValue((app_service.sender, app_service))
|
return (app_service.sender, app_service)
|
||||||
|
|
||||||
if not app_service.is_interested_in_user(user_id):
|
if not app_service.is_interested_in_user(user_id):
|
||||||
raise AuthError(403, "Application service cannot masquerade as this user.")
|
raise AuthError(403, "Application service cannot masquerade as this user.")
|
||||||
if not (yield self.store.get_user_by_id(user_id)):
|
if not (yield self.store.get_user_by_id(user_id)):
|
||||||
raise AuthError(403, "Application service has not registered this user")
|
raise AuthError(403, "Application service has not registered this user")
|
||||||
defer.returnValue((user_id, app_service))
|
return (user_id, app_service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_access_token(self, token, rights="access"):
|
def get_user_by_access_token(self, token, rights="access"):
|
||||||
@@ -313,14 +307,26 @@ class Auth(object):
|
|||||||
`token_id` (int|None): access token id. May be None if guest
|
`token_id` (int|None): access token id. May be None if guest
|
||||||
`device_id` (str|None): device corresponding to access token
|
`device_id` (str|None): device corresponding to access token
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
InvalidClientCredentialsError if no user by that token exists or the token
|
||||||
|
is invalid.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if rights == "access":
|
if rights == "access":
|
||||||
# first look in the database
|
# first look in the database
|
||||||
r = yield self._look_up_user_by_access_token(token)
|
r = yield self._look_up_user_by_access_token(token)
|
||||||
if r:
|
if r:
|
||||||
defer.returnValue(r)
|
valid_until_ms = r["valid_until_ms"]
|
||||||
|
if (
|
||||||
|
valid_until_ms is not None
|
||||||
|
and valid_until_ms < self.clock.time_msec()
|
||||||
|
):
|
||||||
|
# there was a valid access token, but it has expired.
|
||||||
|
# soft-logout the user.
|
||||||
|
raise InvalidClientTokenError(
|
||||||
|
msg="Access token has expired", soft_logout=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return r
|
||||||
|
|
||||||
# otherwise it needs to be a valid macaroon
|
# otherwise it needs to be a valid macaroon
|
||||||
try:
|
try:
|
||||||
@@ -331,11 +337,7 @@ class Auth(object):
|
|||||||
if not guest:
|
if not guest:
|
||||||
# non-guest access tokens must be in the database
|
# non-guest access tokens must be in the database
|
||||||
logger.warning("Unrecognised access token - not in store.")
|
logger.warning("Unrecognised access token - not in store.")
|
||||||
raise AuthError(
|
raise InvalidClientTokenError()
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Unrecognised access token.",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Guest access tokens are not stored in the database (there can
|
# Guest access tokens are not stored in the database (there can
|
||||||
# only be one access token per guest, anyway).
|
# only be one access token per guest, anyway).
|
||||||
@@ -350,16 +352,10 @@ class Auth(object):
|
|||||||
# guest tokens.
|
# guest tokens.
|
||||||
stored_user = yield self.store.get_user_by_id(user_id)
|
stored_user = yield self.store.get_user_by_id(user_id)
|
||||||
if not stored_user:
|
if not stored_user:
|
||||||
raise AuthError(
|
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Unknown user_id %s" % user_id,
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
if not stored_user["is_guest"]:
|
if not stored_user["is_guest"]:
|
||||||
raise AuthError(
|
raise InvalidClientTokenError(
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
"Guest access token used for regular user"
|
||||||
"Guest access token used for regular user",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
)
|
||||||
ret = {
|
ret = {
|
||||||
"user": user,
|
"user": user,
|
||||||
@@ -378,7 +374,7 @@ class Auth(object):
|
|||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Unknown rights setting %s", rights)
|
raise RuntimeError("Unknown rights setting %s", rights)
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
except (
|
except (
|
||||||
_InvalidMacaroonException,
|
_InvalidMacaroonException,
|
||||||
pymacaroons.exceptions.MacaroonException,
|
pymacaroons.exceptions.MacaroonException,
|
||||||
@@ -386,11 +382,7 @@ class Auth(object):
|
|||||||
ValueError,
|
ValueError,
|
||||||
) as e:
|
) as e:
|
||||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||||
raise AuthError(
|
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Invalid macaroon passed.",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||||
@@ -418,25 +410,16 @@ class Auth(object):
|
|||||||
try:
|
try:
|
||||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||||
|
|
||||||
has_expiry = False
|
|
||||||
guest = False
|
guest = False
|
||||||
for caveat in macaroon.caveats:
|
for caveat in macaroon.caveats:
|
||||||
if caveat.caveat_id.startswith("time "):
|
if caveat.caveat_id == "guest = true":
|
||||||
has_expiry = True
|
|
||||||
elif caveat.caveat_id == "guest = true":
|
|
||||||
guest = True
|
guest = True
|
||||||
|
|
||||||
self.validate_macaroon(
|
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
|
||||||
)
|
|
||||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||||
raise AuthError(
|
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Invalid macaroon passed.",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not has_expiry and rights == "access":
|
if rights == "access":
|
||||||
self.token_cache[token] = (user_id, guest)
|
self.token_cache[token] = (user_id, guest)
|
||||||
|
|
||||||
return user_id, guest
|
return user_id, guest
|
||||||
@@ -453,19 +436,16 @@ class Auth(object):
|
|||||||
(str) user id
|
(str) user id
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if there is no user_id caveat in the macaroon
|
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||||
|
macaroon
|
||||||
"""
|
"""
|
||||||
user_prefix = "user_id = "
|
user_prefix = "user_id = "
|
||||||
for caveat in macaroon.caveats:
|
for caveat in macaroon.caveats:
|
||||||
if caveat.caveat_id.startswith(user_prefix):
|
if caveat.caveat_id.startswith(user_prefix):
|
||||||
return caveat.caveat_id[len(user_prefix) :]
|
return caveat.caveat_id[len(user_prefix) :]
|
||||||
raise AuthError(
|
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"No user caveat in macaroon",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||||
"""
|
"""
|
||||||
validate that a Macaroon is understood by and was signed by this server.
|
validate that a Macaroon is understood by and was signed by this server.
|
||||||
|
|
||||||
@@ -473,7 +453,6 @@ class Auth(object):
|
|||||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||||
type_string(str): The kind of token required (e.g. "access",
|
type_string(str): The kind of token required (e.g. "access",
|
||||||
"delete_pusher")
|
"delete_pusher")
|
||||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
|
||||||
user_id (str): The user_id required
|
user_id (str): The user_id required
|
||||||
"""
|
"""
|
||||||
v = pymacaroons.Verifier()
|
v = pymacaroons.Verifier()
|
||||||
@@ -486,19 +465,7 @@ class Auth(object):
|
|||||||
v.satisfy_exact("type = " + type_string)
|
v.satisfy_exact("type = " + type_string)
|
||||||
v.satisfy_exact("user_id = %s" % user_id)
|
v.satisfy_exact("user_id = %s" % user_id)
|
||||||
v.satisfy_exact("guest = true")
|
v.satisfy_exact("guest = true")
|
||||||
|
v.satisfy_general(self._verify_expiry)
|
||||||
# verify_expiry should really always be True, but there exist access
|
|
||||||
# tokens in the wild which expire when they should not, so we can't
|
|
||||||
# enforce expiry yet (so we have to allow any caveat starting with
|
|
||||||
# 'time < ' in access tokens).
|
|
||||||
#
|
|
||||||
# On the other hand, short-term login tokens (as used by CAS login, for
|
|
||||||
# example) have an expiry time which we do want to enforce.
|
|
||||||
|
|
||||||
if verify_expiry:
|
|
||||||
v.satisfy_general(self._verify_expiry)
|
|
||||||
else:
|
|
||||||
v.satisfy_general(lambda c: c.startswith("time < "))
|
|
||||||
|
|
||||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||||
@@ -517,7 +484,7 @@ class Auth(object):
|
|||||||
def _look_up_user_by_access_token(self, token):
|
def _look_up_user_by_access_token(self, token):
|
||||||
ret = yield self.store.get_user_by_access_token(token)
|
ret = yield self.store.get_user_by_access_token(token)
|
||||||
if not ret:
|
if not ret:
|
||||||
defer.returnValue(None)
|
return None
|
||||||
|
|
||||||
# we use ret.get() below because *lots* of unit tests stub out
|
# we use ret.get() below because *lots* of unit tests stub out
|
||||||
# get_user_by_access_token in a way where it only returns a couple of
|
# get_user_by_access_token in a way where it only returns a couple of
|
||||||
@@ -527,26 +494,18 @@ class Auth(object):
|
|||||||
"token_id": ret.get("token_id", None),
|
"token_id": ret.get("token_id", None),
|
||||||
"is_guest": False,
|
"is_guest": False,
|
||||||
"device_id": ret.get("device_id"),
|
"device_id": ret.get("device_id"),
|
||||||
|
"valid_until_ms": ret.get("valid_until_ms"),
|
||||||
}
|
}
|
||||||
defer.returnValue(user_info)
|
return user_info
|
||||||
|
|
||||||
def get_appservice_by_req(self, request):
|
def get_appservice_by_req(self, request):
|
||||||
try:
|
token = self.get_access_token_from_request(request)
|
||||||
token = self.get_access_token_from_request(
|
service = self.store.get_app_service_by_token(token)
|
||||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
if not service:
|
||||||
)
|
logger.warn("Unrecognised appservice access token.")
|
||||||
service = self.store.get_app_service_by_token(token)
|
raise InvalidClientTokenError()
|
||||||
if not service:
|
request.authenticated_entity = service.sender
|
||||||
logger.warn("Unrecognised appservice access token.")
|
return defer.succeed(service)
|
||||||
raise AuthError(
|
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
|
||||||
"Unrecognised access token.",
|
|
||||||
errcode=Codes.UNKNOWN_TOKEN,
|
|
||||||
)
|
|
||||||
request.authenticated_entity = service.sender
|
|
||||||
return defer.succeed(service)
|
|
||||||
except KeyError:
|
|
||||||
raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.")
|
|
||||||
|
|
||||||
def is_server_admin(self, user):
|
def is_server_admin(self, user):
|
||||||
""" Check if the given user is a local server admin.
|
""" Check if the given user is a local server admin.
|
||||||
@@ -562,7 +521,7 @@ class Auth(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
||||||
if event.type == EventTypes.Create:
|
if event.type == EventTypes.Create:
|
||||||
defer.returnValue([])
|
return []
|
||||||
|
|
||||||
auth_ids = []
|
auth_ids = []
|
||||||
|
|
||||||
@@ -623,22 +582,7 @@ class Auth(object):
|
|||||||
if member_event.content["membership"] == Membership.JOIN:
|
if member_event.content["membership"] == Membership.JOIN:
|
||||||
auth_ids.append(member_event.event_id)
|
auth_ids.append(member_event.event_id)
|
||||||
|
|
||||||
defer.returnValue(auth_ids)
|
return auth_ids
|
||||||
|
|
||||||
def check_redaction(self, room_version, event, auth_events):
|
|
||||||
"""Check whether the event sender is allowed to redact the target event.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the the sender is allowed to redact the target event if the
|
|
||||||
target event was created by them.
|
|
||||||
False if the sender is allowed to redact the target event with no
|
|
||||||
further checks.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
AuthError if the event sender is definitely not allowed to redact
|
|
||||||
the target event.
|
|
||||||
"""
|
|
||||||
return event_auth.check_redaction(room_version, event, auth_events)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_can_change_room_list(self, room_id, user):
|
def check_can_change_room_list(self, room_id, user):
|
||||||
@@ -652,7 +596,7 @@ class Auth(object):
|
|||||||
|
|
||||||
is_admin = yield self.is_server_admin(user)
|
is_admin = yield self.is_server_admin(user)
|
||||||
if is_admin:
|
if is_admin:
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
yield self.check_joined_room(room_id, user_id)
|
yield self.check_joined_room(room_id, user_id)
|
||||||
@@ -692,20 +636,16 @@ class Auth(object):
|
|||||||
return bool(query_params) or bool(auth_headers)
|
return bool(query_params) or bool(auth_headers)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
def get_access_token_from_request(request):
|
||||||
"""Extracts the access_token from the request.
|
"""Extracts the access_token from the request.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: The http request.
|
request: The http request.
|
||||||
token_not_found_http_status(int): The HTTP status code to set in the
|
|
||||||
AuthError if the token isn't found. This is used in some of the
|
|
||||||
legacy APIs to change the status code to 403 from the default of
|
|
||||||
401 since some of the old clients depended on auth errors returning
|
|
||||||
403.
|
|
||||||
Returns:
|
Returns:
|
||||||
unicode: The access_token
|
unicode: The access_token
|
||||||
Raises:
|
Raises:
|
||||||
AuthError: If there isn't an access_token in the request.
|
MissingClientTokenError: If there isn't a single access_token in the
|
||||||
|
request
|
||||||
"""
|
"""
|
||||||
|
|
||||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
@@ -714,34 +654,20 @@ class Auth(object):
|
|||||||
# Try the get the access_token from a "Authorization: Bearer"
|
# Try the get the access_token from a "Authorization: Bearer"
|
||||||
# header
|
# header
|
||||||
if query_params is not None:
|
if query_params is not None:
|
||||||
raise AuthError(
|
raise MissingClientTokenError(
|
||||||
token_not_found_http_status,
|
"Mixing Authorization headers and access_token query parameters."
|
||||||
"Mixing Authorization headers and access_token query parameters.",
|
|
||||||
errcode=Codes.MISSING_TOKEN,
|
|
||||||
)
|
)
|
||||||
if len(auth_headers) > 1:
|
if len(auth_headers) > 1:
|
||||||
raise AuthError(
|
raise MissingClientTokenError("Too many Authorization headers.")
|
||||||
token_not_found_http_status,
|
|
||||||
"Too many Authorization headers.",
|
|
||||||
errcode=Codes.MISSING_TOKEN,
|
|
||||||
)
|
|
||||||
parts = auth_headers[0].split(b" ")
|
parts = auth_headers[0].split(b" ")
|
||||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||||
return parts[1].decode("ascii")
|
return parts[1].decode("ascii")
|
||||||
else:
|
else:
|
||||||
raise AuthError(
|
raise MissingClientTokenError("Invalid Authorization header.")
|
||||||
token_not_found_http_status,
|
|
||||||
"Invalid Authorization header.",
|
|
||||||
errcode=Codes.MISSING_TOKEN,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# Try to get the access_token from the query params.
|
# Try to get the access_token from the query params.
|
||||||
if not query_params:
|
if not query_params:
|
||||||
raise AuthError(
|
raise MissingClientTokenError()
|
||||||
token_not_found_http_status,
|
|
||||||
"Missing access token.",
|
|
||||||
errcode=Codes.MISSING_TOKEN,
|
|
||||||
)
|
|
||||||
|
|
||||||
return query_params[0].decode("ascii")
|
return query_params[0].decode("ascii")
|
||||||
|
|
||||||
@@ -764,7 +690,7 @@ class Auth(object):
|
|||||||
# * The user is a guest user, and has joined the room
|
# * The user is a guest user, and has joined the room
|
||||||
# else it will throw.
|
# else it will throw.
|
||||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||||
defer.returnValue((member_event.membership, member_event.event_id))
|
return (member_event.membership, member_event.event_id)
|
||||||
except AuthError:
|
except AuthError:
|
||||||
visibility = yield self.state.get_current_state(
|
visibility = yield self.state.get_current_state(
|
||||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||||
@@ -773,7 +699,7 @@ class Auth(object):
|
|||||||
visibility
|
visibility
|
||||||
and visibility.content["history_visibility"] == "world_readable"
|
and visibility.content["history_visibility"] == "world_readable"
|
||||||
):
|
):
|
||||||
defer.returnValue((Membership.JOIN, None))
|
return (Membership.JOIN, None)
|
||||||
return
|
return
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||||
|
|||||||
+55
-1
@@ -61,6 +61,7 @@ class Codes(object):
|
|||||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||||
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
|
||||||
|
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
@@ -139,6 +140,22 @@ class ConsentNotGivenError(SynapseError):
|
|||||||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDeactivatedError(SynapseError):
|
||||||
|
"""The error returned to the client when the user attempted to access an
|
||||||
|
authenticated endpoint, but the account has been deactivated.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, msg):
|
||||||
|
"""Constructs a UserDeactivatedError
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg (str): The human-readable error message
|
||||||
|
"""
|
||||||
|
super(UserDeactivatedError, self).__init__(
|
||||||
|
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
|
|
||||||
@@ -210,7 +227,9 @@ class NotFoundError(SynapseError):
|
|||||||
|
|
||||||
|
|
||||||
class AuthError(SynapseError):
|
class AuthError(SynapseError):
|
||||||
"""An error raised when there was a problem authorising an event."""
|
"""An error raised when there was a problem authorising an event, and at various
|
||||||
|
other poorly-defined times.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
if "errcode" not in kwargs:
|
if "errcode" not in kwargs:
|
||||||
@@ -218,6 +237,41 @@ class AuthError(SynapseError):
|
|||||||
super(AuthError, self).__init__(*args, **kwargs)
|
super(AuthError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidClientCredentialsError(SynapseError):
|
||||||
|
"""An error raised when there was a problem with the authorisation credentials
|
||||||
|
in a client request.
|
||||||
|
|
||||||
|
https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens:
|
||||||
|
|
||||||
|
When credentials are required but missing or invalid, the HTTP call will
|
||||||
|
return with a status of 401 and the error code, M_MISSING_TOKEN or
|
||||||
|
M_UNKNOWN_TOKEN respectively.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, msg, errcode):
|
||||||
|
super().__init__(code=401, msg=msg, errcode=errcode)
|
||||||
|
|
||||||
|
|
||||||
|
class MissingClientTokenError(InvalidClientCredentialsError):
|
||||||
|
"""Raised when we couldn't find the access token in a request"""
|
||||||
|
|
||||||
|
def __init__(self, msg="Missing access token"):
|
||||||
|
super().__init__(msg=msg, errcode="M_MISSING_TOKEN")
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||||
|
"""Raised when we didn't understand the access token in a request"""
|
||||||
|
|
||||||
|
def __init__(self, msg="Unrecognised access token", soft_logout=False):
|
||||||
|
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||||
|
self._soft_logout = soft_logout
|
||||||
|
|
||||||
|
def error_dict(self):
|
||||||
|
d = super().error_dict()
|
||||||
|
d["soft_logout"] = self._soft_logout
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
class ResourceLimitError(SynapseError):
|
class ResourceLimitError(SynapseError):
|
||||||
"""
|
"""
|
||||||
Any error raised when there is a problem with resource usage.
|
Any error raised when there is a problem with resource usage.
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ class Filtering(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_filter(self, user_localpart, filter_id):
|
def get_user_filter(self, user_localpart, filter_id):
|
||||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||||
defer.returnValue(FilterCollection(result))
|
return FilterCollection(result)
|
||||||
|
|
||||||
def add_user_filter(self, user_localpart, user_filter):
|
def add_user_filter(self, user_localpart, user_filter):
|
||||||
self.check_valid_filter(user_filter)
|
self.check_valid_filter(user_filter)
|
||||||
|
|||||||
+101
-30
@@ -15,7 +15,9 @@
|
|||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import signal
|
import signal
|
||||||
|
import socket
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
@@ -27,7 +29,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
|
|||||||
import synapse
|
import synapse
|
||||||
from synapse.app import check_bind_error
|
from synapse.app import check_bind_error
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util import PreserveLoggingContext
|
from synapse.logging.context import PreserveLoggingContext
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
@@ -48,7 +50,7 @@ def register_sighup(func):
|
|||||||
_sighup_callbacks.append(func)
|
_sighup_callbacks.append(func)
|
||||||
|
|
||||||
|
|
||||||
def start_worker_reactor(appname, config):
|
def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||||
""" Run the reactor in the main process
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
Daemonizes if necessary, and then configures some resources, before starting
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
@@ -57,6 +59,7 @@ def start_worker_reactor(appname, config):
|
|||||||
Args:
|
Args:
|
||||||
appname (str): application name which will be sent to syslog
|
appname (str): application name which will be sent to syslog
|
||||||
config (synapse.config.Config): config object
|
config (synapse.config.Config): config object
|
||||||
|
run_command (Callable[]): callable that actually runs the reactor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
logger = logging.getLogger(config.worker_app)
|
logger = logging.getLogger(config.worker_app)
|
||||||
@@ -69,11 +72,19 @@ def start_worker_reactor(appname, config):
|
|||||||
daemonize=config.worker_daemonize,
|
daemonize=config.worker_daemonize,
|
||||||
print_pidfile=config.print_pidfile,
|
print_pidfile=config.print_pidfile,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
|
run_command=run_command,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def start_reactor(
|
def start_reactor(
|
||||||
appname, soft_file_limit, gc_thresholds, pid_file, daemonize, print_pidfile, logger
|
appname,
|
||||||
|
soft_file_limit,
|
||||||
|
gc_thresholds,
|
||||||
|
pid_file,
|
||||||
|
daemonize,
|
||||||
|
print_pidfile,
|
||||||
|
logger,
|
||||||
|
run_command=reactor.run,
|
||||||
):
|
):
|
||||||
""" Run the reactor in the main process
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
@@ -88,38 +99,42 @@ def start_reactor(
|
|||||||
daemonize (bool): true to run the reactor in a background process
|
daemonize (bool): true to run the reactor in a background process
|
||||||
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
||||||
logger (logging.Logger): logger instance to pass to Daemonize
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
|
run_command (Callable[]): callable that actually runs the reactor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
install_dns_limiter(reactor)
|
install_dns_limiter(reactor)
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
logger.info("Running")
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
change_resource_limit(soft_file_limit)
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
if gc_thresholds:
|
||||||
# between the sentinel and `run` logcontexts.
|
gc.set_threshold(*gc_thresholds)
|
||||||
with PreserveLoggingContext():
|
run_command()
|
||||||
logger.info("Running")
|
|
||||||
|
|
||||||
change_resource_limit(soft_file_limit)
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
if gc_thresholds:
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
gc.set_threshold(*gc_thresholds)
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
reactor.run()
|
# between the sentinel and `run` logcontexts.
|
||||||
|
#
|
||||||
|
# We also need to drop the logcontext before forking if we're daemonizing,
|
||||||
|
# otherwise the cputime metrics get confused about the per-thread resource usage
|
||||||
|
# appearing to go backwards.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
if daemonize:
|
||||||
|
if print_pidfile:
|
||||||
|
print(pid_file)
|
||||||
|
|
||||||
if daemonize:
|
daemon = Daemonize(
|
||||||
if print_pidfile:
|
app=appname,
|
||||||
print(pid_file)
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
daemon = Daemonize(
|
auto_close_fds=False,
|
||||||
app=appname,
|
verbose=True,
|
||||||
pid=pid_file,
|
logger=logger,
|
||||||
action=run,
|
)
|
||||||
auto_close_fds=False,
|
daemon.start()
|
||||||
verbose=True,
|
else:
|
||||||
logger=logger,
|
run()
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
def quit_with_error(error_string):
|
||||||
@@ -136,8 +151,7 @@ def listen_metrics(bind_addresses, port):
|
|||||||
"""
|
"""
|
||||||
Start Prometheus metrics server.
|
Start Prometheus metrics server.
|
||||||
"""
|
"""
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy, start_http_server
|
||||||
from prometheus_client import start_http_server
|
|
||||||
|
|
||||||
for host in bind_addresses:
|
for host in bind_addresses:
|
||||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||||
@@ -230,9 +244,15 @@ def start(hs, listeners=None):
|
|||||||
if hasattr(signal, "SIGHUP"):
|
if hasattr(signal, "SIGHUP"):
|
||||||
|
|
||||||
def handle_sighup(*args, **kwargs):
|
def handle_sighup(*args, **kwargs):
|
||||||
|
# Tell systemd our state, if we're using it. This will silently fail if
|
||||||
|
# we're not using systemd.
|
||||||
|
sdnotify(b"RELOADING=1")
|
||||||
|
|
||||||
for i in _sighup_callbacks:
|
for i in _sighup_callbacks:
|
||||||
i(hs)
|
i(hs)
|
||||||
|
|
||||||
|
sdnotify(b"READY=1")
|
||||||
|
|
||||||
signal.signal(signal.SIGHUP, handle_sighup)
|
signal.signal(signal.SIGHUP, handle_sighup)
|
||||||
|
|
||||||
register_sighup(refresh_certificate)
|
register_sighup(refresh_certificate)
|
||||||
@@ -240,11 +260,15 @@ def start(hs, listeners=None):
|
|||||||
# Load the certificate from disk.
|
# Load the certificate from disk.
|
||||||
refresh_certificate(hs)
|
refresh_certificate(hs)
|
||||||
|
|
||||||
|
# Start the tracer
|
||||||
|
synapse.logging.opentracing.init_tracer(hs.config)
|
||||||
|
|
||||||
# It is now safe to start your Synapse.
|
# It is now safe to start your Synapse.
|
||||||
hs.start_listening(listeners)
|
hs.start_listening(listeners)
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
|
|
||||||
setup_sentry(hs)
|
setup_sentry(hs)
|
||||||
|
setup_sdnotify(hs)
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc(file=sys.stderr)
|
traceback.print_exc(file=sys.stderr)
|
||||||
reactor = hs.get_reactor()
|
reactor = hs.get_reactor()
|
||||||
@@ -277,6 +301,21 @@ def setup_sentry(hs):
|
|||||||
scope.set_tag("worker_name", name)
|
scope.set_tag("worker_name", name)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_sdnotify(hs):
|
||||||
|
"""Adds process state hooks to tell systemd what we are up to.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Tell systemd our state, if we're using it. This will silently fail if
|
||||||
|
# we're not using systemd.
|
||||||
|
hs.get_reactor().addSystemEventTrigger(
|
||||||
|
"after", "startup", sdnotify, b"READY=1\nMAINPID=%i" % (os.getpid(),)
|
||||||
|
)
|
||||||
|
|
||||||
|
hs.get_reactor().addSystemEventTrigger(
|
||||||
|
"before", "shutdown", sdnotify, b"STOPPING=1"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
||||||
"""Replaces the resolver with one that limits the number of in flight DNS
|
"""Replaces the resolver with one that limits the number of in flight DNS
|
||||||
requests.
|
requests.
|
||||||
@@ -370,3 +409,35 @@ class _DeferredResolutionReceiver(object):
|
|||||||
def resolutionComplete(self):
|
def resolutionComplete(self):
|
||||||
self._deferred.callback(())
|
self._deferred.callback(())
|
||||||
self._receiver.resolutionComplete()
|
self._receiver.resolutionComplete()
|
||||||
|
|
||||||
|
|
||||||
|
sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
|
||||||
|
|
||||||
|
|
||||||
|
def sdnotify(state):
|
||||||
|
"""
|
||||||
|
Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
|
||||||
|
|
||||||
|
This function is based on the sdnotify python package, but since it's only a few
|
||||||
|
lines of code, it's easier to duplicate it here than to add a dependency on a
|
||||||
|
package which many OSes don't include as a matter of principle.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state (bytes): notification to send
|
||||||
|
"""
|
||||||
|
if not isinstance(state, bytes):
|
||||||
|
raise TypeError("sdnotify should be called with a bytes")
|
||||||
|
if not sdnotify_sockaddr:
|
||||||
|
return
|
||||||
|
addr = sdnotify_sockaddr
|
||||||
|
if addr[0] == "@":
|
||||||
|
addr = "\0" + addr[1:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
|
||||||
|
sock.connect(addr)
|
||||||
|
sock.sendall(state)
|
||||||
|
except Exception as e:
|
||||||
|
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
|
||||||
|
# unless systemd is expecting us to notify it.
|
||||||
|
logger.warning("Unable to send notification to systemd: %s", e)
|
||||||
|
|||||||
@@ -0,0 +1,264 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from canonicaljson import json
|
||||||
|
|
||||||
|
from twisted.internet import defer, task
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.handlers.admin import ExfiltrationWriter
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||||
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.admin_cmd")
|
||||||
|
|
||||||
|
|
||||||
|
class AdminCmdSlavedStore(
|
||||||
|
SlavedReceiptsStore,
|
||||||
|
SlavedAccountDataStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedFilteringStore,
|
||||||
|
SlavedPresenceStore,
|
||||||
|
SlavedGroupServerStore,
|
||||||
|
SlavedDeviceInboxStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
RoomStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AdminCmdServer(HomeServer):
|
||||||
|
DATASTORE_CLASS = AdminCmdSlavedStore
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return AdminCmdReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class AdminCmdReplicationHandler(ReplicationClientHandler):
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def export_data_command(hs, args):
|
||||||
|
"""Export data for a user.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hs (HomeServer)
|
||||||
|
args (argparse.Namespace)
|
||||||
|
"""
|
||||||
|
|
||||||
|
user_id = args.user_id
|
||||||
|
directory = args.output_directory
|
||||||
|
|
||||||
|
res = yield hs.get_handlers().admin_handler.export_user_data(
|
||||||
|
user_id, FileExfiltrationWriter(user_id, directory=directory)
|
||||||
|
)
|
||||||
|
print(res)
|
||||||
|
|
||||||
|
|
||||||
|
class FileExfiltrationWriter(ExfiltrationWriter):
|
||||||
|
"""An ExfiltrationWriter that writes the users data to a directory.
|
||||||
|
Returns the directory location on completion.
|
||||||
|
|
||||||
|
Note: This writes to disk on the main reactor thread.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): The user whose data is being exfiltrated.
|
||||||
|
directory (str|None): The directory to write the data to, if None then
|
||||||
|
will write to a temporary directory.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user_id, directory=None):
|
||||||
|
self.user_id = user_id
|
||||||
|
|
||||||
|
if directory:
|
||||||
|
self.base_directory = directory
|
||||||
|
else:
|
||||||
|
self.base_directory = tempfile.mkdtemp(
|
||||||
|
prefix="synapse-exfiltrate__%s__" % (user_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
os.makedirs(self.base_directory, exist_ok=True)
|
||||||
|
if list(os.listdir(self.base_directory)):
|
||||||
|
raise Exception("Directory must be empty")
|
||||||
|
|
||||||
|
def write_events(self, room_id, events):
|
||||||
|
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||||
|
os.makedirs(room_directory, exist_ok=True)
|
||||||
|
events_file = os.path.join(room_directory, "events")
|
||||||
|
|
||||||
|
with open(events_file, "a") as f:
|
||||||
|
for event in events:
|
||||||
|
print(json.dumps(event.get_pdu_json()), file=f)
|
||||||
|
|
||||||
|
def write_state(self, room_id, event_id, state):
|
||||||
|
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||||
|
state_directory = os.path.join(room_directory, "state")
|
||||||
|
os.makedirs(state_directory, exist_ok=True)
|
||||||
|
|
||||||
|
event_file = os.path.join(state_directory, event_id)
|
||||||
|
|
||||||
|
with open(event_file, "a") as f:
|
||||||
|
for event in state.values():
|
||||||
|
print(json.dumps(event.get_pdu_json()), file=f)
|
||||||
|
|
||||||
|
def write_invite(self, room_id, event, state):
|
||||||
|
self.write_events(room_id, [event])
|
||||||
|
|
||||||
|
# We write the invite state somewhere else as they aren't full events
|
||||||
|
# and are only a subset of the state at the event.
|
||||||
|
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||||
|
os.makedirs(room_directory, exist_ok=True)
|
||||||
|
|
||||||
|
invite_state = os.path.join(room_directory, "invite_state")
|
||||||
|
|
||||||
|
with open(invite_state, "a") as f:
|
||||||
|
for event in state.values():
|
||||||
|
print(json.dumps(event), file=f)
|
||||||
|
|
||||||
|
def finished(self):
|
||||||
|
return self.base_directory
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
parser = argparse.ArgumentParser(description="Synapse Admin Command")
|
||||||
|
HomeServerConfig.add_arguments_to_parser(parser)
|
||||||
|
|
||||||
|
subparser = parser.add_subparsers(
|
||||||
|
title="Admin Commands",
|
||||||
|
required=True,
|
||||||
|
dest="command",
|
||||||
|
metavar="<admin_command>",
|
||||||
|
help="The admin command to perform.",
|
||||||
|
)
|
||||||
|
export_data_parser = subparser.add_parser(
|
||||||
|
"export-data", help="Export all data for a user"
|
||||||
|
)
|
||||||
|
export_data_parser.add_argument("user_id", help="User to extra data from")
|
||||||
|
export_data_parser.add_argument(
|
||||||
|
"--output-directory",
|
||||||
|
action="store",
|
||||||
|
metavar="DIRECTORY",
|
||||||
|
required=False,
|
||||||
|
help="The directory to store the exported data in. Must be empty. Defaults"
|
||||||
|
" to creating a temp directory.",
|
||||||
|
)
|
||||||
|
export_data_parser.set_defaults(func=export_data_command)
|
||||||
|
|
||||||
|
try:
|
||||||
|
config, args = HomeServerConfig.load_config_with_parser(parser, config_options)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if config.worker_app is not None:
|
||||||
|
assert config.worker_app == "synapse.app.admin_cmd"
|
||||||
|
|
||||||
|
# Update the config with some basic overrides so that don't have to specify
|
||||||
|
# a full worker config.
|
||||||
|
config.worker_app = "synapse.app.admin_cmd"
|
||||||
|
|
||||||
|
if (
|
||||||
|
not config.worker_daemonize
|
||||||
|
and not config.worker_log_file
|
||||||
|
and not config.worker_log_config
|
||||||
|
):
|
||||||
|
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||||
|
# unless we've actually set log config.
|
||||||
|
config.no_redirect_stdio = True
|
||||||
|
|
||||||
|
# Explicitly disable background processes
|
||||||
|
config.update_user_directory = False
|
||||||
|
config.start_pushers = False
|
||||||
|
config.send_federation = False
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
ss = AdminCmdServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
|
||||||
|
# We use task.react as the basic run command as it correctly handles tearing
|
||||||
|
# down the reactor when the deferreds resolve and setting the return value.
|
||||||
|
# We also make sure that `_base.start` gets run before we actually run the
|
||||||
|
# command.
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def run(_reactor):
|
||||||
|
with LoggingContext("command"):
|
||||||
|
yield _base.start(ss, [])
|
||||||
|
yield args.func(ss, args)
|
||||||
|
|
||||||
|
_base.start_worker_reactor(
|
||||||
|
"synapse-admin-cmd", config, run_command=lambda: task.react(run)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -36,7 +36,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -169,7 +168,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ps, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-appservice", config)
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -64,7 +64,6 @@ from synapse.rest.client.versions import VersionsRestServlet
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -195,7 +194,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-client-reader", config)
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -59,7 +59,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -194,7 +193,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-event-creator", config)
|
_base.start_worker_reactor("synapse-event-creator", config)
|
||||||
|
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -48,7 +48,6 @@ from synapse.rest.key.v2 import KeyApiV2Resource
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -176,7 +175,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
|
|
||||||
|
|||||||
@@ -27,9 +27,9 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -44,7 +44,6 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.types import ReadReceipt
|
from synapse.types import ReadReceipt
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -199,7 +198,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
@@ -41,7 +41,6 @@ from synapse.rest.client.v2_alpha._base import client_patterns
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -71,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
|
|||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
raise e.to_synapse_error()
|
raise e.to_synapse_error()
|
||||||
|
|
||||||
defer.returnValue((200, result))
|
return (200, result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_PUT(self, request, user_id):
|
def on_PUT(self, request, user_id):
|
||||||
yield self.auth.get_user_by_req(request)
|
yield self.auth.get_user_by_req(request)
|
||||||
defer.returnValue((200, {}))
|
return (200, {})
|
||||||
|
|
||||||
|
|
||||||
class KeyUploadServlet(RestServlet):
|
class KeyUploadServlet(RestServlet):
|
||||||
@@ -127,11 +126,11 @@ class KeyUploadServlet(RestServlet):
|
|||||||
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
self.main_uri + request.uri.decode("ascii"), body, headers=headers
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((200, result))
|
return (200, result)
|
||||||
else:
|
else:
|
||||||
# Just interested in counts.
|
# Just interested in counts.
|
||||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||||
defer.returnValue((200, {"one_time_key_counts": result}))
|
return (200, {"one_time_key_counts": result})
|
||||||
|
|
||||||
|
|
||||||
class FrontendProxySlavedStore(
|
class FrontendProxySlavedStore(
|
||||||
@@ -248,7 +247,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
|||||||
Executable → Regular
+3
-4
@@ -54,9 +54,9 @@ from synapse.federation.transport.server import TransportLayerServer
|
|||||||
from synapse.http.additional_resource import AdditionalResource
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.python_dependencies import check_requirements
|
from synapse.python_dependencies import check_requirements
|
||||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||||
@@ -72,7 +72,6 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
|||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.module_loader import load_module
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
@@ -407,7 +406,7 @@ def setup(config_options):
|
|||||||
if provision:
|
if provision:
|
||||||
yield acme.provision_certificate()
|
yield acme.provision_certificate()
|
||||||
|
|
||||||
defer.returnValue(provision)
|
return provision
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def reprovision_acme():
|
def reprovision_acme():
|
||||||
|
|||||||
@@ -26,21 +26,22 @@ from synapse.app import _base
|
|||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.admin import register_servlets_for_media_repo
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -72,6 +73,12 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
media_repo = self.get_media_repository_resource()
|
media_repo = self.get_media_repository_resource()
|
||||||
|
|
||||||
|
# We need to serve the admin servlets for media on the
|
||||||
|
# worker.
|
||||||
|
admin_resource = JsonResource(self, canonical_json=False)
|
||||||
|
register_servlets_for_media_repo(self, admin_resource)
|
||||||
|
|
||||||
resources.update(
|
resources.update(
|
||||||
{
|
{
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
@@ -79,6 +86,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||||
self, self.config.uploads_path
|
self, self.config.uploads_path
|
||||||
),
|
),
|
||||||
|
"/_synapse/admin": admin_resource,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -162,7 +170,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-media-repository", config)
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import __func__
|
from synapse.replication.slave.storage._base import __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
@@ -38,7 +38,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -217,7 +216,7 @@ def start(config_options):
|
|||||||
_base.start(ps, config.worker_listeners)
|
_base.start(ps, config.worker_listeners)
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.addSystemEventTrigger("before", "startup", start)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-pusher", config)
|
_base.start_worker_reactor("synapse-pusher", config)
|
||||||
|
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -57,7 +57,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
@@ -452,7 +451,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
|
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
@@ -46,7 +46,6 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
@@ -225,7 +224,9 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "startup", _base.start, ss, config.worker_listeners
|
||||||
|
)
|
||||||
|
|
||||||
_base.start_worker_reactor("synapse-user-dir", config)
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
|||||||
@@ -175,21 +175,21 @@ class ApplicationService(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _matches_user(self, event, store):
|
def _matches_user(self, event, store):
|
||||||
if not event:
|
if not event:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
if self.is_interested_in_user(event.sender):
|
if self.is_interested_in_user(event.sender):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
# also check m.room.member state key
|
# also check m.room.member state key
|
||||||
if event.type == EventTypes.Member and self.is_interested_in_user(
|
if event.type == EventTypes.Member and self.is_interested_in_user(
|
||||||
event.state_key
|
event.state_key
|
||||||
):
|
):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
if not store:
|
if not store:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||||
defer.returnValue(does_match)
|
return does_match
|
||||||
|
|
||||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||||
@@ -200,8 +200,8 @@ class ApplicationService(object):
|
|||||||
# check joined member events
|
# check joined member events
|
||||||
for user_id in member_list:
|
for user_id in member_list:
|
||||||
if self.is_interested_in_user(user_id):
|
if self.is_interested_in_user(user_id):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
def _matches_room_id(self, event):
|
def _matches_room_id(self, event):
|
||||||
if hasattr(event, "room_id"):
|
if hasattr(event, "room_id"):
|
||||||
@@ -211,13 +211,13 @@ class ApplicationService(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _matches_aliases(self, event, store):
|
def _matches_aliases(self, event, store):
|
||||||
if not store or not event:
|
if not store or not event:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||||
for alias in alias_list:
|
for alias in alias_list:
|
||||||
if self.is_interested_in_alias(alias):
|
if self.is_interested_in_alias(alias):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def is_interested(self, event, store=None):
|
def is_interested(self, event, store=None):
|
||||||
@@ -231,15 +231,15 @@ class ApplicationService(object):
|
|||||||
"""
|
"""
|
||||||
# Do cheap checks first
|
# Do cheap checks first
|
||||||
if self._matches_room_id(event):
|
if self._matches_room_id(event):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
if (yield self._matches_aliases(event, store)):
|
if (yield self._matches_aliases(event, store)):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
if (yield self._matches_user(event, store)):
|
if (yield self._matches_user(event, store)):
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
def is_interested_in_user(self, user_id):
|
def is_interested_in_user(self, user_id):
|
||||||
return (
|
return (
|
||||||
|
|||||||
+19
-19
@@ -97,40 +97,40 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||||
if response is not None: # just an empty json object
|
if response is not None: # just an empty json object
|
||||||
defer.returnValue(True)
|
return True
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
if e.code == 404:
|
if e.code == 404:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
return
|
return
|
||||||
logger.warning("query_user to %s received %s", uri, e.code)
|
logger.warning("query_user to %s received %s", uri, e.code)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_alias(self, service, alias):
|
def query_alias(self, service, alias):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
response = yield self.get_json(uri, {"access_token": service.hs_token})
|
||||||
if response is not None: # just an empty json object
|
if response is not None: # just an empty json object
|
||||||
defer.returnValue(True)
|
return True
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
logger.warning("query_alias to %s received %s", uri, e.code)
|
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||||
if e.code == 404:
|
if e.code == 404:
|
||||||
defer.returnValue(False)
|
return False
|
||||||
return
|
return
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_3pe(self, service, kind, protocol, fields):
|
def query_3pe(self, service, kind, protocol, fields):
|
||||||
@@ -141,7 +141,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind)
|
raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind)
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue([])
|
return []
|
||||||
|
|
||||||
uri = "%s%s/thirdparty/%s/%s" % (
|
uri = "%s%s/thirdparty/%s/%s" % (
|
||||||
service.url,
|
service.url,
|
||||||
@@ -155,7 +155,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
logger.warning(
|
logger.warning(
|
||||||
"query_3pe to %s returned an invalid response %r", uri, response
|
"query_3pe to %s returned an invalid response %r", uri, response
|
||||||
)
|
)
|
||||||
defer.returnValue([])
|
return []
|
||||||
|
|
||||||
ret = []
|
ret = []
|
||||||
for r in response:
|
for r in response:
|
||||||
@@ -166,14 +166,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
"query_3pe to %s returned an invalid result %r", uri, r
|
"query_3pe to %s returned an invalid result %r", uri, r
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue([])
|
return []
|
||||||
|
|
||||||
def get_3pe_protocol(self, service, protocol):
|
def get_3pe_protocol(self, service, protocol):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get():
|
def _get():
|
||||||
@@ -189,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
logger.warning(
|
logger.warning(
|
||||||
"query_3pe_protocol to %s did not return a" " valid result", uri
|
"query_3pe_protocol to %s did not return a" " valid result", uri
|
||||||
)
|
)
|
||||||
defer.returnValue(None)
|
return None
|
||||||
|
|
||||||
for instance in info.get("instances", []):
|
for instance in info.get("instances", []):
|
||||||
network_id = instance.get("network_id", None)
|
network_id = instance.get("network_id", None)
|
||||||
@@ -198,10 +198,10 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
service.id, network_id
|
service.id, network_id
|
||||||
).to_string()
|
).to_string()
|
||||||
|
|
||||||
defer.returnValue(info)
|
return info
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
|
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue(None)
|
return None
|
||||||
|
|
||||||
key = (service.id, protocol)
|
key = (service.id, protocol)
|
||||||
return self.protocol_meta_cache.wrap(key, _get)
|
return self.protocol_meta_cache.wrap(key, _get)
|
||||||
@@ -209,7 +209,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def push_bulk(self, service, events, txn_id=None):
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
events = self._serialize(events)
|
events = self._serialize(events)
|
||||||
|
|
||||||
@@ -229,14 +229,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
)
|
)
|
||||||
sent_transactions_counter.labels(service.id).inc()
|
sent_transactions_counter.labels(service.id).inc()
|
||||||
sent_events_counter.labels(service.id).inc(len(events))
|
sent_events_counter.labels(service.id).inc(len(events))
|
||||||
defer.returnValue(True)
|
return True
|
||||||
return
|
return
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
logger.warning("push_bulk to %s received %s", uri, e.code)
|
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||||
failed_transactions_counter.labels(service.id).inc()
|
failed_transactions_counter.labels(service.id).inc()
|
||||||
defer.returnValue(False)
|
return False
|
||||||
|
|
||||||
def _serialize(self, events):
|
def _serialize(self, events):
|
||||||
time_now = self.clock.time_msec()
|
time_now = self.clock.time_msec()
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -193,7 +193,7 @@ class _TransactionController(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _is_service_up(self, service):
|
def _is_service_up(self, service):
|
||||||
state = yield self.store.get_appservice_state(service)
|
state = yield self.store.get_appservice_state(service)
|
||||||
defer.returnValue(state == ApplicationServiceState.UP or state is None)
|
return state == ApplicationServiceState.UP or state is None
|
||||||
|
|
||||||
|
|
||||||
class _Recoverer(object):
|
class _Recoverer(object):
|
||||||
@@ -208,7 +208,7 @@ class _Recoverer(object):
|
|||||||
r.service.id,
|
r.service.id,
|
||||||
)
|
)
|
||||||
r.recover()
|
r.recover()
|
||||||
defer.returnValue(recoverers)
|
return recoverers
|
||||||
|
|
||||||
def __init__(self, clock, store, as_api, service, callback):
|
def __init__(self, clock, store, as_api, service, callback):
|
||||||
self.clock = clock
|
self.clock = clock
|
||||||
|
|||||||
+71
-6
@@ -137,12 +137,42 @@ class Config(object):
|
|||||||
return file_stream.read()
|
return file_stream.read()
|
||||||
|
|
||||||
def invoke_all(self, name, *args, **kargs):
|
def invoke_all(self, name, *args, **kargs):
|
||||||
|
"""Invoke all instance methods with the given name and arguments in the
|
||||||
|
class's MRO.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of function to invoke
|
||||||
|
*args
|
||||||
|
**kwargs
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: The list of the return values from each method called
|
||||||
|
"""
|
||||||
results = []
|
results = []
|
||||||
for cls in type(self).mro():
|
for cls in type(self).mro():
|
||||||
if name in cls.__dict__:
|
if name in cls.__dict__:
|
||||||
results.append(getattr(cls, name)(self, *args, **kargs))
|
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def invoke_all_static(cls, name, *args, **kargs):
|
||||||
|
"""Invoke all static methods with the given name and arguments in the
|
||||||
|
class's MRO.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of function to invoke
|
||||||
|
*args
|
||||||
|
**kwargs
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: The list of the return values from each method called
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
for c in cls.mro():
|
||||||
|
if name in c.__dict__:
|
||||||
|
results.append(getattr(c, name)(*args, **kargs))
|
||||||
|
return results
|
||||||
|
|
||||||
def generate_config(
|
def generate_config(
|
||||||
self,
|
self,
|
||||||
config_dir_path,
|
config_dir_path,
|
||||||
@@ -202,6 +232,23 @@ class Config(object):
|
|||||||
Returns: Config object.
|
Returns: Config object.
|
||||||
"""
|
"""
|
||||||
config_parser = argparse.ArgumentParser(description=description)
|
config_parser = argparse.ArgumentParser(description=description)
|
||||||
|
cls.add_arguments_to_parser(config_parser)
|
||||||
|
obj, _ = cls.load_config_with_parser(config_parser, argv)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_arguments_to_parser(cls, config_parser):
|
||||||
|
"""Adds all the config flags to an ArgumentParser.
|
||||||
|
|
||||||
|
Doesn't support config-file-generation: used by the worker apps.
|
||||||
|
|
||||||
|
Used for workers where we want to add extra flags/subcommands.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_parser (ArgumentParser): App description
|
||||||
|
"""
|
||||||
|
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c",
|
"-c",
|
||||||
"--config-path",
|
"--config-path",
|
||||||
@@ -219,16 +266,34 @@ class Config(object):
|
|||||||
" Defaults to the directory containing the last config file",
|
" Defaults to the directory containing the last config file",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cls.invoke_all_static("add_arguments", config_parser)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_config_with_parser(cls, parser, argv):
|
||||||
|
"""Parse the commandline and config files with the given parser
|
||||||
|
|
||||||
|
Doesn't support config-file-generation: used by the worker apps.
|
||||||
|
|
||||||
|
Used for workers where we want to add extra flags/subcommands.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
parser (ArgumentParser)
|
||||||
|
argv (list[str])
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed
|
||||||
|
config object and the parsed argparse.Namespace object from
|
||||||
|
`parser.parse_args(..)`
|
||||||
|
"""
|
||||||
|
|
||||||
obj = cls()
|
obj = cls()
|
||||||
|
|
||||||
obj.invoke_all("add_arguments", config_parser)
|
config_args = parser.parse_args(argv)
|
||||||
|
|
||||||
config_args = config_parser.parse_args(argv)
|
|
||||||
|
|
||||||
config_files = find_config_files(search_paths=config_args.config_path)
|
config_files = find_config_files(search_paths=config_args.config_path)
|
||||||
|
|
||||||
if not config_files:
|
if not config_files:
|
||||||
config_parser.error("Must supply a config file.")
|
parser.error("Must supply a config file.")
|
||||||
|
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
@@ -244,7 +309,7 @@ class Config(object):
|
|||||||
|
|
||||||
obj.invoke_all("read_arguments", config_args)
|
obj.invoke_all("read_arguments", config_args)
|
||||||
|
|
||||||
return obj
|
return obj, config_args
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_or_generate_config(cls, description, argv):
|
def load_or_generate_config(cls, description, argv):
|
||||||
@@ -401,7 +466,7 @@ class Config(object):
|
|||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
)
|
)
|
||||||
|
|
||||||
obj.invoke_all("add_arguments", parser)
|
obj.invoke_all_static("add_arguments", parser)
|
||||||
args = parser.parse_args(remaining_args)
|
args = parser.parse_args(remaining_args)
|
||||||
|
|
||||||
config_dict = read_config_files(config_files)
|
config_dict = read_config_files(config_files)
|
||||||
|
|||||||
@@ -69,7 +69,8 @@ class DatabaseConfig(Config):
|
|||||||
if database_path is not None:
|
if database_path is not None:
|
||||||
self.database_config["args"]["database"] = database_path
|
self.database_config["args"]["database"] = database_path
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
@staticmethod
|
||||||
|
def add_arguments(parser):
|
||||||
db_group = parser.add_argument_group("database")
|
db_group = parser.add_argument_group("database")
|
||||||
db_group.add_argument(
|
db_group.add_argument(
|
||||||
"-d",
|
"-d",
|
||||||
|
|||||||
@@ -112,13 +112,17 @@ class EmailConfig(Config):
|
|||||||
missing = []
|
missing = []
|
||||||
for k in required:
|
for k in required:
|
||||||
if k not in email_config:
|
if k not in email_config:
|
||||||
missing.append(k)
|
missing.append("email." + k)
|
||||||
|
|
||||||
|
if config.get("public_baseurl") is None:
|
||||||
|
missing.append("public_base_url")
|
||||||
|
|
||||||
if len(missing) > 0:
|
if len(missing) > 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"email.password_reset_behaviour is set to 'local' "
|
"Password resets emails are configured to be sent from "
|
||||||
"but required keys are missing: %s"
|
"this homeserver due to a partial 'email' block. "
|
||||||
% (", ".join(["email." + k for k in missing]),)
|
"However, the following required keys are missing: %s"
|
||||||
|
% (", ".join(missing),)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Templates for password reset emails
|
# Templates for password reset emails
|
||||||
@@ -156,13 +160,6 @@ class EmailConfig(Config):
|
|||||||
filepath, "email.password_reset_template_success_html"
|
filepath, "email.password_reset_template_success_html"
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.get("public_baseurl") is None:
|
|
||||||
raise RuntimeError(
|
|
||||||
"email.password_reset_behaviour is set to 'local' but no "
|
|
||||||
"public_baseurl is set. This is necessary to generate password "
|
|
||||||
"reset links"
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.email_enable_notifs:
|
if self.email_enable_notifs:
|
||||||
required = [
|
required = [
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ from .spam_checker import SpamCheckerConfig
|
|||||||
from .stats import StatsConfig
|
from .stats import StatsConfig
|
||||||
from .third_party_event_rules import ThirdPartyRulesConfig
|
from .third_party_event_rules import ThirdPartyRulesConfig
|
||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
|
from .tracer import TracerConfig
|
||||||
from .user_directory import UserDirectoryConfig
|
from .user_directory import UserDirectoryConfig
|
||||||
from .voip import VoipConfig
|
from .voip import VoipConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
@@ -75,5 +76,6 @@ class HomeServerConfig(
|
|||||||
ServerNoticesConfig,
|
ServerNoticesConfig,
|
||||||
RoomDirectoryConfig,
|
RoomDirectoryConfig,
|
||||||
ThirdPartyRulesConfig,
|
ThirdPartyRulesConfig,
|
||||||
|
TracerConfig,
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -116,8 +116,6 @@ class KeyConfig(Config):
|
|||||||
seed = bytes(self.signing_key[0])
|
seed = bytes(self.signing_key[0])
|
||||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||||
|
|
||||||
self.expire_access_token = config.get("expire_access_token", False)
|
|
||||||
|
|
||||||
# a secret which is used to calculate HMACs for form values, to stop
|
# a secret which is used to calculate HMACs for form values, to stop
|
||||||
# falsification of values
|
# falsification of values
|
||||||
self.form_secret = config.get("form_secret", None)
|
self.form_secret = config.get("form_secret", None)
|
||||||
@@ -144,10 +142,6 @@ class KeyConfig(Config):
|
|||||||
#
|
#
|
||||||
%(macaroon_secret_key)s
|
%(macaroon_secret_key)s
|
||||||
|
|
||||||
# Used to enable access token expiration.
|
|
||||||
#
|
|
||||||
#expire_access_token: False
|
|
||||||
|
|
||||||
# a secret which is used to calculate HMACs for form values, to stop
|
# a secret which is used to calculate HMACs for form values, to stop
|
||||||
# falsification of values. Must be specified for the User Consent
|
# falsification of values. Must be specified for the User Consent
|
||||||
# forms to work.
|
# forms to work.
|
||||||
|
|||||||
+16
-72
@@ -12,6 +12,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import os
|
import os
|
||||||
@@ -24,7 +25,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner
|
|||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.app import _base as appbase
|
from synapse.app import _base as appbase
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.logging.context import LoggingContextFilter
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
@@ -40,7 +41,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
@@ -75,10 +76,8 @@ root:
|
|||||||
|
|
||||||
class LoggingConfig(Config):
|
class LoggingConfig(Config):
|
||||||
def read_config(self, config, **kwargs):
|
def read_config(self, config, **kwargs):
|
||||||
self.verbosity = config.get("verbose", 0)
|
|
||||||
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
|
||||||
self.log_config = self.abspath(config.get("log_config"))
|
self.log_config = self.abspath(config.get("log_config"))
|
||||||
self.log_file = self.abspath(config.get("log_file"))
|
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||||
|
|
||||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
@@ -94,37 +93,12 @@ class LoggingConfig(Config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def read_arguments(self, args):
|
def read_arguments(self, args):
|
||||||
if args.verbose is not None:
|
|
||||||
self.verbosity = args.verbose
|
|
||||||
if args.no_redirect_stdio is not None:
|
if args.no_redirect_stdio is not None:
|
||||||
self.no_redirect_stdio = args.no_redirect_stdio
|
self.no_redirect_stdio = args.no_redirect_stdio
|
||||||
if args.log_config is not None:
|
|
||||||
self.log_config = args.log_config
|
|
||||||
if args.log_file is not None:
|
|
||||||
self.log_file = args.log_file
|
|
||||||
|
|
||||||
def add_arguments(cls, parser):
|
@staticmethod
|
||||||
|
def add_arguments(parser):
|
||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
|
||||||
"-v",
|
|
||||||
"--verbose",
|
|
||||||
dest="verbose",
|
|
||||||
action="count",
|
|
||||||
help="The verbosity level. Specify multiple times to increase "
|
|
||||||
"verbosity. (Ignored if --log-config is specified.)",
|
|
||||||
)
|
|
||||||
logging_group.add_argument(
|
|
||||||
"-f",
|
|
||||||
"--log-file",
|
|
||||||
dest="log_file",
|
|
||||||
help="File to log to. (Ignored if --log-config is specified.)",
|
|
||||||
)
|
|
||||||
logging_group.add_argument(
|
|
||||||
"--log-config",
|
|
||||||
dest="log_config",
|
|
||||||
default=None,
|
|
||||||
help="Python logging config file",
|
|
||||||
)
|
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
"-n",
|
"-n",
|
||||||
"--no-redirect-stdio",
|
"--no-redirect-stdio",
|
||||||
@@ -152,58 +126,29 @@ def setup_logging(config, use_worker_options=False):
|
|||||||
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||||
configuration data
|
configuration data
|
||||||
|
|
||||||
use_worker_options (bool): True to use 'worker_log_config' and
|
use_worker_options (bool): True to use the 'worker_log_config' option
|
||||||
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
instead of 'log_config'.
|
||||||
|
|
||||||
register_sighup (func | None): Function to call to register a
|
register_sighup (func | None): Function to call to register a
|
||||||
sighup handler.
|
sighup handler.
|
||||||
"""
|
"""
|
||||||
log_config = config.worker_log_config if use_worker_options else config.log_config
|
log_config = config.worker_log_config if use_worker_options else config.log_config
|
||||||
log_file = config.worker_log_file if use_worker_options else config.log_file
|
|
||||||
|
|
||||||
log_format = (
|
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
|
||||||
" - %(message)s"
|
|
||||||
)
|
|
||||||
|
|
||||||
if log_config is None:
|
if log_config is None:
|
||||||
# We don't have a logfile, so fall back to the 'verbosity' param from
|
log_format = (
|
||||||
# the config or cmdline. (Note that we generate a log config for new
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
# installs, so this will be an unusual case)
|
" - %(message)s"
|
||||||
level = logging.INFO
|
)
|
||||||
level_for_storage = logging.INFO
|
|
||||||
if config.verbosity:
|
|
||||||
level = logging.DEBUG
|
|
||||||
if config.verbosity > 1:
|
|
||||||
level_for_storage = logging.DEBUG
|
|
||||||
|
|
||||||
logger = logging.getLogger("")
|
logger = logging.getLogger("")
|
||||||
logger.setLevel(level)
|
logger.setLevel(logging.INFO)
|
||||||
|
logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)
|
||||||
logging.getLogger("synapse.storage.SQL").setLevel(level_for_storage)
|
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
formatter = logging.Formatter(log_format)
|
||||||
if log_file:
|
|
||||||
# TODO: Customisable file size / backup count
|
|
||||||
handler = logging.handlers.RotatingFileHandler(
|
|
||||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding="utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
def sighup(signum, stack):
|
|
||||||
logger.info("Closing log file due to SIGHUP")
|
|
||||||
handler.doRollover()
|
|
||||||
logger.info("Opened new log file due to SIGHUP")
|
|
||||||
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
|
|
||||||
def sighup(*args):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
handler = logging.StreamHandler()
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
handler.addFilter(LoggingContextFilter(request=""))
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
|
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
else:
|
else:
|
||||||
|
|
||||||
@@ -217,8 +162,7 @@ def setup_logging(config, use_worker_options=False):
|
|||||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||||
|
|
||||||
load_log_config()
|
load_log_config()
|
||||||
|
appbase.register_sighup(sighup)
|
||||||
appbase.register_sighup(sighup)
|
|
||||||
|
|
||||||
# make sure that the first thing we log is a thing we can grep backwards
|
# make sure that the first thing we log is a thing we can grep backwards
|
||||||
# for
|
# for
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class RateLimitConfig(object):
|
|||||||
|
|
||||||
class FederationRateLimitConfig(object):
|
class FederationRateLimitConfig(object):
|
||||||
_items_and_default = {
|
_items_and_default = {
|
||||||
"window_size": 10000,
|
"window_size": 1000,
|
||||||
"sleep_limit": 10,
|
"sleep_limit": 10,
|
||||||
"sleep_delay": 500,
|
"sleep_delay": 500,
|
||||||
"reject_limit": 50,
|
"reject_limit": 50,
|
||||||
@@ -54,7 +54,7 @@ class RatelimitConfig(Config):
|
|||||||
|
|
||||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||||
# back to the old method.
|
# back to the old method.
|
||||||
if "federation_rc" in config:
|
if "rc_federation" in config:
|
||||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||||
else:
|
else:
|
||||||
self.rc_federation = FederationRateLimitConfig(
|
self.rc_federation = FederationRateLimitConfig(
|
||||||
@@ -80,6 +80,12 @@ class RatelimitConfig(Config):
|
|||||||
"federation_rr_transactions_per_room_per_second", 50
|
"federation_rr_transactions_per_room_per_second", 50
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rc_admin_redaction = config.get("rc_admin_redaction")
|
||||||
|
if rc_admin_redaction:
|
||||||
|
self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
|
||||||
|
else:
|
||||||
|
self.rc_admin_redaction = None
|
||||||
|
|
||||||
def generate_config_section(self, **kwargs):
|
def generate_config_section(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
## Ratelimiting ##
|
## Ratelimiting ##
|
||||||
@@ -102,6 +108,9 @@ class RatelimitConfig(Config):
|
|||||||
# - one for login that ratelimits login requests based on the account the
|
# - one for login that ratelimits login requests based on the account the
|
||||||
# client is attempting to log into, based on the amount of failed login
|
# client is attempting to log into, based on the amount of failed login
|
||||||
# attempts for this account.
|
# attempts for this account.
|
||||||
|
# - one for ratelimiting redactions by room admins. If this is not explicitly
|
||||||
|
# set then it uses the same ratelimiting as per rc_message. This is useful
|
||||||
|
# to allow room admins to deal with abuse quickly.
|
||||||
#
|
#
|
||||||
# The defaults are as shown below.
|
# The defaults are as shown below.
|
||||||
#
|
#
|
||||||
@@ -123,6 +132,10 @@ class RatelimitConfig(Config):
|
|||||||
# failed_attempts:
|
# failed_attempts:
|
||||||
# per_second: 0.17
|
# per_second: 0.17
|
||||||
# burst_count: 3
|
# burst_count: 3
|
||||||
|
#
|
||||||
|
#rc_admin_redaction:
|
||||||
|
# per_second: 1
|
||||||
|
# burst_count: 50
|
||||||
|
|
||||||
|
|
||||||
# Ratelimiting settings for incoming federation
|
# Ratelimiting settings for incoming federation
|
||||||
|
|||||||
@@ -13,8 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
from distutils.util import strtobool
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
from synapse.config._base import Config, ConfigError
|
from synapse.config._base import Config, ConfigError
|
||||||
from synapse.types import RoomAlias
|
from synapse.types import RoomAlias
|
||||||
from synapse.util.stringutils import random_string_with_symbols
|
from synapse.util.stringutils import random_string_with_symbols
|
||||||
@@ -41,8 +44,36 @@ class AccountValidityConfig(Config):
|
|||||||
|
|
||||||
self.startup_job_max_delta = self.period * 10.0 / 100.0
|
self.startup_job_max_delta = self.period * 10.0 / 100.0
|
||||||
|
|
||||||
if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
|
if self.renew_by_email_enabled:
|
||||||
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
if "public_baseurl" not in synapse_config:
|
||||||
|
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
|
||||||
|
|
||||||
|
template_dir = config.get("template_dir")
|
||||||
|
|
||||||
|
if not template_dir:
|
||||||
|
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
|
||||||
|
|
||||||
|
if "account_renewed_html_path" in config:
|
||||||
|
file_path = os.path.join(template_dir, config["account_renewed_html_path"])
|
||||||
|
|
||||||
|
self.account_renewed_html_content = self.read_file(
|
||||||
|
file_path, "account_validity.account_renewed_html_path"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.account_renewed_html_content = (
|
||||||
|
"<html><body>Your account has been successfully renewed.</body><html>"
|
||||||
|
)
|
||||||
|
|
||||||
|
if "invalid_token_html_path" in config:
|
||||||
|
file_path = os.path.join(template_dir, config["invalid_token_html_path"])
|
||||||
|
|
||||||
|
self.invalid_token_html_content = self.read_file(
|
||||||
|
file_path, "account_validity.invalid_token_html_path"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.invalid_token_html_content = (
|
||||||
|
"<html><body>Invalid renewal token.</body><html>"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class RegistrationConfig(Config):
|
class RegistrationConfig(Config):
|
||||||
@@ -71,9 +102,8 @@ class RegistrationConfig(Config):
|
|||||||
self.default_identity_server = config.get("default_identity_server")
|
self.default_identity_server = config.get("default_identity_server")
|
||||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||||
|
|
||||||
self.invite_3pid_guest = self.allow_guest_access and config.get(
|
if config.get("invite_3pid_guest", False):
|
||||||
"invite_3pid_guest", False
|
raise ConfigError("invite_3pid_guest is no longer supported")
|
||||||
)
|
|
||||||
|
|
||||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
for room_alias in self.auto_join_rooms:
|
for room_alias in self.auto_join_rooms:
|
||||||
@@ -85,6 +115,11 @@ class RegistrationConfig(Config):
|
|||||||
"disable_msisdn_registration", False
|
"disable_msisdn_registration", False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
session_lifetime = config.get("session_lifetime")
|
||||||
|
if session_lifetime is not None:
|
||||||
|
session_lifetime = self.parse_duration(session_lifetime)
|
||||||
|
self.session_lifetime = session_lifetime
|
||||||
|
|
||||||
def generate_config_section(self, generate_secrets=False, **kwargs):
|
def generate_config_section(self, generate_secrets=False, **kwargs):
|
||||||
if generate_secrets:
|
if generate_secrets:
|
||||||
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
||||||
@@ -141,6 +176,27 @@ class RegistrationConfig(Config):
|
|||||||
# period: 6w
|
# period: 6w
|
||||||
# renew_at: 1w
|
# renew_at: 1w
|
||||||
# renew_email_subject: "Renew your %%(app)s account"
|
# renew_email_subject: "Renew your %%(app)s account"
|
||||||
|
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||||
|
# # user when trying to renew an account. Optional, defaults to
|
||||||
|
# # synapse/res/templates.
|
||||||
|
# template_dir: "res/templates"
|
||||||
|
# # HTML to be displayed to the user after they successfully renewed their
|
||||||
|
# # account. Optional.
|
||||||
|
# account_renewed_html_path: "account_renewed.html"
|
||||||
|
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||||
|
# # renewal token. Optional.
|
||||||
|
# invalid_token_html_path: "invalid_token.html"
|
||||||
|
|
||||||
|
# Time that a user's session remains valid for, after they log in.
|
||||||
|
#
|
||||||
|
# Note that this is not currently compatible with guest logins.
|
||||||
|
#
|
||||||
|
# Note also that this is calculated at login time: changes are not applied
|
||||||
|
# retrospectively to users who have already logged in.
|
||||||
|
#
|
||||||
|
# By default, this is infinite.
|
||||||
|
#
|
||||||
|
#session_lifetime: 24h
|
||||||
|
|
||||||
# The user must provide all of the below types of 3PID when registering.
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
#
|
#
|
||||||
@@ -222,7 +278,8 @@ class RegistrationConfig(Config):
|
|||||||
% locals()
|
% locals()
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
@staticmethod
|
||||||
|
def add_arguments(parser):
|
||||||
reg_group = parser.add_argument_group("registration")
|
reg_group = parser.add_argument_group("registration")
|
||||||
reg_group.add_argument(
|
reg_group.add_argument(
|
||||||
"--enable-registration",
|
"--enable-registration",
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
@@ -87,6 +88,18 @@ def parse_thumbnail_requirements(thumbnail_sizes):
|
|||||||
|
|
||||||
class ContentRepositoryConfig(Config):
|
class ContentRepositoryConfig(Config):
|
||||||
def read_config(self, config, **kwargs):
|
def read_config(self, config, **kwargs):
|
||||||
|
|
||||||
|
# Only enable the media repo if either the media repo is enabled or the
|
||||||
|
# current worker app is the media repo.
|
||||||
|
if (
|
||||||
|
self.enable_media_repo is False
|
||||||
|
and config.get("worker_app") != "synapse.app.media_repository"
|
||||||
|
):
|
||||||
|
self.can_load_media_repo = False
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.can_load_media_repo = True
|
||||||
|
|
||||||
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
|
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
|
||||||
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
|
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
|
||||||
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
|
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
|
||||||
@@ -202,6 +215,13 @@ class ContentRepositoryConfig(Config):
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
r"""
|
r"""
|
||||||
|
## Media Store ##
|
||||||
|
|
||||||
|
# Enable the media store service in the Synapse master. Uncomment the
|
||||||
|
# following if you are using a separate media store worker.
|
||||||
|
#
|
||||||
|
#enable_media_repo: false
|
||||||
|
|
||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
#
|
#
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|||||||
+138
-53
@@ -12,7 +12,13 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import re
|
||||||
|
|
||||||
from synapse.python_dependencies import DependencyException, check_requirements
|
from synapse.python_dependencies import DependencyException, check_requirements
|
||||||
|
from synapse.types import (
|
||||||
|
map_username_to_mxid_localpart,
|
||||||
|
mxid_localpart_allowed_characters,
|
||||||
|
)
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
@@ -26,6 +32,9 @@ class SAML2Config(Config):
|
|||||||
if not saml2_config or not saml2_config.get("enabled", True):
|
if not saml2_config or not saml2_config.get("enabled", True):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
check_requirements("saml2")
|
check_requirements("saml2")
|
||||||
except DependencyException as e:
|
except DependencyException as e:
|
||||||
@@ -33,6 +42,14 @@ class SAML2Config(Config):
|
|||||||
|
|
||||||
self.saml2_enabled = True
|
self.saml2_enabled = True
|
||||||
|
|
||||||
|
self.saml2_mxid_source_attribute = saml2_config.get(
|
||||||
|
"mxid_source_attribute", "uid"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
|
||||||
|
"grandfathered_mxid_source_attribute", "uid"
|
||||||
|
)
|
||||||
|
|
||||||
import saml2.config
|
import saml2.config
|
||||||
|
|
||||||
self.saml2_sp_config = saml2.config.SPConfig()
|
self.saml2_sp_config = saml2.config.SPConfig()
|
||||||
@@ -48,6 +65,12 @@ class SAML2Config(Config):
|
|||||||
saml2_config.get("saml_session_lifetime", "5m")
|
saml2_config.get("saml_session_lifetime", "5m")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
mapping = saml2_config.get("mxid_mapping", "hexencode")
|
||||||
|
try:
|
||||||
|
self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping]
|
||||||
|
except KeyError:
|
||||||
|
raise ConfigError("%s is not a known mxid_mapping" % (mapping,))
|
||||||
|
|
||||||
def _default_saml_config_dict(self):
|
def _default_saml_config_dict(self):
|
||||||
import saml2
|
import saml2
|
||||||
|
|
||||||
@@ -55,6 +78,13 @@ class SAML2Config(Config):
|
|||||||
if public_baseurl is None:
|
if public_baseurl is None:
|
||||||
raise ConfigError("saml2_config requires a public_baseurl to be set")
|
raise ConfigError("saml2_config requires a public_baseurl to be set")
|
||||||
|
|
||||||
|
required_attributes = {"uid", self.saml2_mxid_source_attribute}
|
||||||
|
|
||||||
|
optional_attributes = {"displayName"}
|
||||||
|
if self.saml2_grandfathered_mxid_source_attribute:
|
||||||
|
optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
|
||||||
|
optional_attributes -= required_attributes
|
||||||
|
|
||||||
metadata_url = public_baseurl + "_matrix/saml2/metadata.xml"
|
metadata_url = public_baseurl + "_matrix/saml2/metadata.xml"
|
||||||
response_url = public_baseurl + "_matrix/saml2/authn_response"
|
response_url = public_baseurl + "_matrix/saml2/authn_response"
|
||||||
return {
|
return {
|
||||||
@@ -66,8 +96,9 @@ class SAML2Config(Config):
|
|||||||
(response_url, saml2.BINDING_HTTP_POST)
|
(response_url, saml2.BINDING_HTTP_POST)
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"required_attributes": ["uid"],
|
"required_attributes": list(required_attributes),
|
||||||
"optional_attributes": ["mail", "surname", "givenname"],
|
"optional_attributes": list(optional_attributes),
|
||||||
|
# "name_id_format": saml2.saml.NAMEID_FORMAT_PERSISTENT,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -76,12 +107,13 @@ class SAML2Config(Config):
|
|||||||
return """\
|
return """\
|
||||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||||
#
|
#
|
||||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
# At least one of `sp_config` or `config_path` must be set in this section to
|
||||||
# See pysaml2 docs for format of config.
|
# enable SAML login.
|
||||||
#
|
#
|
||||||
# Default values will be used for the 'entityid' and 'service' settings,
|
# (You will probably also want to set the following options to `false` to
|
||||||
# so it is not normally necessary to specify them unless you need to
|
# disable the regular login/registration flows:
|
||||||
# override them.
|
# * enable_registration
|
||||||
|
# * password_config.enabled
|
||||||
#
|
#
|
||||||
# Once SAML support is enabled, a metadata file will be exposed at
|
# Once SAML support is enabled, a metadata file will be exposed at
|
||||||
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
|
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
|
||||||
@@ -89,52 +121,105 @@ class SAML2Config(Config):
|
|||||||
# the IdP to use an ACS location of
|
# the IdP to use an ACS location of
|
||||||
# https://<server>:<port>/_matrix/saml2/authn_response.
|
# https://<server>:<port>/_matrix/saml2/authn_response.
|
||||||
#
|
#
|
||||||
#saml2_config:
|
saml2_config:
|
||||||
# sp_config:
|
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||||
# # point this to the IdP's metadata. You can use either a local file or
|
# See pysaml2 docs for format of config.
|
||||||
# # (preferably) a URL.
|
#
|
||||||
# metadata:
|
# Default values will be used for the 'entityid' and 'service' settings,
|
||||||
# #local: ["saml2/idp.xml"]
|
# so it is not normally necessary to specify them unless you need to
|
||||||
# remote:
|
# override them.
|
||||||
# - url: https://our_idp/metadata.xml
|
#
|
||||||
#
|
#sp_config:
|
||||||
# # By default, the user has to go to our login page first. If you'd like to
|
# # point this to the IdP's metadata. You can use either a local file or
|
||||||
# # allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
# # (preferably) a URL.
|
||||||
# # 'service.sp' section:
|
# metadata:
|
||||||
# #
|
# #local: ["saml2/idp.xml"]
|
||||||
# #service:
|
# remote:
|
||||||
# # sp:
|
# - url: https://our_idp/metadata.xml
|
||||||
# # allow_unsolicited: True
|
#
|
||||||
#
|
# # By default, the user has to go to our login page first. If you'd like
|
||||||
# # The examples below are just used to generate our metadata xml, and you
|
# # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
|
||||||
# # may well not need it, depending on your setup. Alternatively you
|
# # 'service.sp' section:
|
||||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
# #
|
||||||
#
|
# #service:
|
||||||
# description: ["My awesome SP", "en"]
|
# # sp:
|
||||||
# name: ["Test SP", "en"]
|
# # allow_unsolicited: true
|
||||||
#
|
#
|
||||||
# organization:
|
# # The examples below are just used to generate our metadata xml, and you
|
||||||
# name: Example com
|
# # may well not need them, depending on your setup. Alternatively you
|
||||||
# display_name:
|
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||||
# - ["Example co", "en"]
|
#
|
||||||
# url: "http://example.com"
|
# description: ["My awesome SP", "en"]
|
||||||
#
|
# name: ["Test SP", "en"]
|
||||||
# contact_person:
|
#
|
||||||
# - given_name: Bob
|
# organization:
|
||||||
# sur_name: "the Sysadmin"
|
# name: Example com
|
||||||
# email_address": ["admin@example.com"]
|
# display_name:
|
||||||
# contact_type": technical
|
# - ["Example co", "en"]
|
||||||
#
|
# url: "http://example.com"
|
||||||
# # Instead of putting the config inline as above, you can specify a
|
#
|
||||||
# # separate pysaml2 configuration file:
|
# contact_person:
|
||||||
# #
|
# - given_name: Bob
|
||||||
# config_path: "%(config_dir_path)s/sp_conf.py"
|
# sur_name: "the Sysadmin"
|
||||||
#
|
# email_address": ["admin@example.com"]
|
||||||
# # the lifetime of a SAML session. This defines how long a user has to
|
# contact_type": technical
|
||||||
# # complete the authentication process, if allow_unsolicited is unset.
|
|
||||||
# # The default is 5 minutes.
|
# Instead of putting the config inline as above, you can specify a
|
||||||
# #
|
# separate pysaml2 configuration file:
|
||||||
# # saml_session_lifetime: 5m
|
#
|
||||||
|
#config_path: "%(config_dir_path)s/sp_conf.py"
|
||||||
|
|
||||||
|
# the lifetime of a SAML session. This defines how long a user has to
|
||||||
|
# complete the authentication process, if allow_unsolicited is unset.
|
||||||
|
# The default is 5 minutes.
|
||||||
|
#
|
||||||
|
#saml_session_lifetime: 5m
|
||||||
|
|
||||||
|
# The SAML attribute (after mapping via the attribute maps) to use to derive
|
||||||
|
# the Matrix ID from. 'uid' by default.
|
||||||
|
#
|
||||||
|
#mxid_source_attribute: displayName
|
||||||
|
|
||||||
|
# The mapping system to use for mapping the saml attribute onto a matrix ID.
|
||||||
|
# Options include:
|
||||||
|
# * 'hexencode' (which maps unpermitted characters to '=xx')
|
||||||
|
# * 'dotreplace' (which replaces unpermitted characters with '.').
|
||||||
|
# The default is 'hexencode'.
|
||||||
|
#
|
||||||
|
#mxid_mapping: dotreplace
|
||||||
|
|
||||||
|
# In previous versions of synapse, the mapping from SAML attribute to MXID was
|
||||||
|
# always calculated dynamically rather than stored in a table. For backwards-
|
||||||
|
# compatibility, we will look for user_ids matching such a pattern before
|
||||||
|
# creating a new account.
|
||||||
|
#
|
||||||
|
# This setting controls the SAML attribute which will be used for this
|
||||||
|
# backwards-compatibility lookup. Typically it should be 'uid', but if the
|
||||||
|
# attribute maps are changed, it may be necessary to change it.
|
||||||
|
#
|
||||||
|
# The default is 'uid'.
|
||||||
|
#
|
||||||
|
#grandfathered_mxid_source_attribute: upn
|
||||||
""" % {
|
""" % {
|
||||||
"config_dir_path": config_dir_path
|
"config_dir_path": config_dir_path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DOT_REPLACE_PATTERN = re.compile(
|
||||||
|
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def dot_replace_for_mxid(username: str) -> str:
|
||||||
|
username = username.lower()
|
||||||
|
username = DOT_REPLACE_PATTERN.sub(".", username)
|
||||||
|
|
||||||
|
# regular mxids aren't allowed to start with an underscore either
|
||||||
|
username = re.sub("^_", "", username)
|
||||||
|
return username
|
||||||
|
|
||||||
|
|
||||||
|
MXID_MAPPER_MAP = {
|
||||||
|
"hexencode": map_username_to_mxid_localpart,
|
||||||
|
"dotreplace": dot_replace_for_mxid,
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,6 +18,7 @@
|
|||||||
import logging
|
import logging
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
import attr
|
||||||
from netaddr import IPSet
|
from netaddr import IPSet
|
||||||
|
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||||
@@ -38,6 +39,12 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
|||||||
|
|
||||||
DEFAULT_ROOM_VERSION = "4"
|
DEFAULT_ROOM_VERSION = "4"
|
||||||
|
|
||||||
|
ROOM_COMPLEXITY_TOO_GREAT = (
|
||||||
|
"Your homeserver is unable to join rooms this large or complex. "
|
||||||
|
"Please speak to your server administrator, or upgrade your instance "
|
||||||
|
"to join this room."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ServerConfig(Config):
|
class ServerConfig(Config):
|
||||||
def read_config(self, config, **kwargs):
|
def read_config(self, config, **kwargs):
|
||||||
@@ -136,7 +143,7 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
# Whether to enable experimental MSC1849 (aka relations) support
|
# Whether to enable experimental MSC1849 (aka relations) support
|
||||||
self.experimental_msc1849_support_enabled = config.get(
|
self.experimental_msc1849_support_enabled = config.get(
|
||||||
"experimental_msc1849_support_enabled", False
|
"experimental_msc1849_support_enabled", True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Options to control access by tracking MAU
|
# Options to control access by tracking MAU
|
||||||
@@ -247,6 +254,23 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
|
|
||||||
|
@attr.s
|
||||||
|
class LimitRemoteRoomsConfig(object):
|
||||||
|
enabled = attr.ib(
|
||||||
|
validator=attr.validators.instance_of(bool), default=False
|
||||||
|
)
|
||||||
|
complexity = attr.ib(
|
||||||
|
validator=attr.validators.instance_of((int, float)), default=1.0
|
||||||
|
)
|
||||||
|
complexity_error = attr.ib(
|
||||||
|
validator=attr.validators.instance_of(str),
|
||||||
|
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||||
|
**config.get("limit_remote_rooms", {})
|
||||||
|
)
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
if bind_port:
|
if bind_port:
|
||||||
if config.get("no_tls", False):
|
if config.get("no_tls", False):
|
||||||
@@ -617,6 +641,23 @@ class ServerConfig(Config):
|
|||||||
# Used by phonehome stats to group together related servers.
|
# Used by phonehome stats to group together related servers.
|
||||||
#server_context: context
|
#server_context: context
|
||||||
|
|
||||||
|
# Resource-constrained Homeserver Settings
|
||||||
|
#
|
||||||
|
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||||
|
# checked before a user joins a new remote room. If it is above
|
||||||
|
# limit_remote_rooms.complexity, it will disallow joining or
|
||||||
|
# instantly leave.
|
||||||
|
#
|
||||||
|
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||||
|
# displayed to the user when a room above the complexity threshold has
|
||||||
|
# its join cancelled.
|
||||||
|
#
|
||||||
|
# Uncomment the below lines to enable:
|
||||||
|
#limit_remote_rooms:
|
||||||
|
# enabled: True
|
||||||
|
# complexity: 1.0
|
||||||
|
# complexity_error: "This room is too complex."
|
||||||
|
|
||||||
# Whether to require a user to be in the room to add an alias to it.
|
# Whether to require a user to be in the room to add an alias to it.
|
||||||
# Defaults to 'true'.
|
# Defaults to 'true'.
|
||||||
#
|
#
|
||||||
@@ -639,7 +680,8 @@ class ServerConfig(Config):
|
|||||||
if args.print_pidfile is not None:
|
if args.print_pidfile is not None:
|
||||||
self.print_pidfile = args.print_pidfile
|
self.print_pidfile = args.print_pidfile
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
@staticmethod
|
||||||
|
def add_arguments(parser):
|
||||||
server_group = parser.add_argument_group("server")
|
server_group = parser.add_argument_group("server")
|
||||||
server_group.add_argument(
|
server_group.add_argument(
|
||||||
"-D",
|
"-D",
|
||||||
|
|||||||
@@ -0,0 +1,81 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.d
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
|
||||||
|
class TracerConfig(Config):
|
||||||
|
def read_config(self, config, **kwargs):
|
||||||
|
opentracing_config = config.get("opentracing")
|
||||||
|
if opentracing_config is None:
|
||||||
|
opentracing_config = {}
|
||||||
|
|
||||||
|
self.opentracer_enabled = opentracing_config.get("enabled", False)
|
||||||
|
|
||||||
|
self.jaeger_config = opentracing_config.get(
|
||||||
|
"jaeger_config",
|
||||||
|
{"sampler": {"type": "const", "param": 1}, "logging": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.opentracer_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
# The tracer is enabled so sanitize the config
|
||||||
|
|
||||||
|
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
|
||||||
|
if not isinstance(self.opentracer_whitelist, list):
|
||||||
|
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||||
|
|
||||||
|
def generate_config_section(cls, **kwargs):
|
||||||
|
return """\
|
||||||
|
## Opentracing ##
|
||||||
|
|
||||||
|
# These settings enable opentracing, which implements distributed tracing.
|
||||||
|
# This allows you to observe the causal chains of events across servers
|
||||||
|
# including requests, key lookups etc., across any server running
|
||||||
|
# synapse or any other other services which supports opentracing
|
||||||
|
# (specifically those implemented with Jaeger).
|
||||||
|
#
|
||||||
|
opentracing:
|
||||||
|
# tracing is disabled by default. Uncomment the following line to enable it.
|
||||||
|
#
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||||
|
# See docs/opentracing.rst
|
||||||
|
# This is a list of regexes which are matched against the server_name of the
|
||||||
|
# homeserver.
|
||||||
|
#
|
||||||
|
# By defult, it is empty, so no servers are matched.
|
||||||
|
#
|
||||||
|
#homeserver_whitelist:
|
||||||
|
# - ".*"
|
||||||
|
|
||||||
|
# Jaeger can be configured to sample traces at different rates.
|
||||||
|
# All configuration options provided by Jaeger can be set here.
|
||||||
|
# Jaeger's configuration mostly related to trace sampling which
|
||||||
|
# is documented here:
|
||||||
|
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||||
|
#
|
||||||
|
#jaeger_config:
|
||||||
|
# sampler:
|
||||||
|
# type: const
|
||||||
|
# param: 1
|
||||||
|
|
||||||
|
# Logging whether spans were started and reported
|
||||||
|
#
|
||||||
|
# logging:
|
||||||
|
# false
|
||||||
|
"""
|
||||||
@@ -31,7 +31,6 @@ class WorkerConfig(Config):
|
|||||||
self.worker_listeners = config.get("worker_listeners", [])
|
self.worker_listeners = config.get("worker_listeners", [])
|
||||||
self.worker_daemonize = config.get("worker_daemonize")
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
self.worker_pid_file = config.get("worker_pid_file")
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
self.worker_log_file = config.get("worker_log_file")
|
|
||||||
self.worker_log_config = config.get("worker_log_config")
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
|
|
||||||
# The host used to connect to the main synapse
|
# The host used to connect to the main synapse
|
||||||
@@ -78,9 +77,5 @@ class WorkerConfig(Config):
|
|||||||
|
|
||||||
if args.daemonize is not None:
|
if args.daemonize is not None:
|
||||||
self.worker_daemonize = args.daemonize
|
self.worker_daemonize = args.daemonize
|
||||||
if args.log_config is not None:
|
|
||||||
self.worker_log_config = args.log_config
|
|
||||||
if args.log_file is not None:
|
|
||||||
self.worker_log_file = args.log_file
|
|
||||||
if args.manhole is not None:
|
if args.manhole is not None:
|
||||||
self.worker_manhole = args.worker_manhole
|
self.worker_manhole = args.worker_manhole
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ from twisted.internet.ssl import (
|
|||||||
platformTrust,
|
platformTrust,
|
||||||
)
|
)
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
|
from twisted.web.iweb import IPolicyForHTTPS
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -74,6 +75,7 @@ class ServerContextFactory(ContextFactory):
|
|||||||
return self._context
|
return self._context
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IPolicyForHTTPS)
|
||||||
class ClientTLSOptionsFactory(object):
|
class ClientTLSOptionsFactory(object):
|
||||||
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
||||||
to remote servers for federation.
|
to remote servers for federation.
|
||||||
@@ -146,6 +148,12 @@ class ClientTLSOptionsFactory(object):
|
|||||||
f = Failure()
|
f = Failure()
|
||||||
tls_protocol.failVerification(f)
|
tls_protocol.failVerification(f)
|
||||||
|
|
||||||
|
def creatorForNetloc(self, hostname, port):
|
||||||
|
"""Implements the IPolicyForHTTPS interace so that this can be passed
|
||||||
|
directly to agents.
|
||||||
|
"""
|
||||||
|
return self.get_options(hostname)
|
||||||
|
|
||||||
|
|
||||||
@implementer(IOpenSSLClientConnectionCreator)
|
@implementer(IOpenSSLClientConnectionCreator)
|
||||||
class SSLClientConnectionCreator(object):
|
class SSLClientConnectionCreator(object):
|
||||||
|
|||||||
+55
-64
@@ -44,15 +44,16 @@ from synapse.api.errors import (
|
|||||||
RequestSendFailed,
|
RequestSendFailed,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.storage.keys import FetchKeyResult
|
from synapse.logging.context import (
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
|
||||||
from synapse.util.async_helpers import yieldable_gather_results
|
|
||||||
from synapse.util.logcontext import (
|
|
||||||
LoggingContext,
|
LoggingContext,
|
||||||
PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
|
make_deferred_yieldable,
|
||||||
preserve_fn,
|
preserve_fn,
|
||||||
run_in_background,
|
run_in_background,
|
||||||
)
|
)
|
||||||
|
from synapse.storage.keys import FetchKeyResult
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
@@ -140,7 +141,7 @@ class Keyring(object):
|
|||||||
"""
|
"""
|
||||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||||
requests = (req,)
|
requests = (req,)
|
||||||
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
|
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
@@ -237,27 +238,9 @@ class Keyring(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# create a deferred for each server we're going to look up the keys
|
ctx = LoggingContext.current_context()
|
||||||
# for; we'll resolve them once we have completed our lookups.
|
|
||||||
# These will be passed into wait_for_previous_lookups to block
|
|
||||||
# any other lookups until we have finished.
|
|
||||||
# The deferreds are called with no logcontext.
|
|
||||||
server_to_deferred = {
|
|
||||||
rq.server_name: defer.Deferred() for rq in verify_requests
|
|
||||||
}
|
|
||||||
|
|
||||||
# We want to wait for any previous lookups to complete before
|
# map from server name to a set of outstanding request ids
|
||||||
# proceeding.
|
|
||||||
yield self.wait_for_previous_lookups(server_to_deferred)
|
|
||||||
|
|
||||||
# Actually start fetching keys.
|
|
||||||
self._get_server_verify_keys(verify_requests)
|
|
||||||
|
|
||||||
# When we've finished fetching all the keys for a given server_name,
|
|
||||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
|
||||||
# any lookups waiting will proceed.
|
|
||||||
#
|
|
||||||
# map from server name to a set of request ids
|
|
||||||
server_to_request_ids = {}
|
server_to_request_ids = {}
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
@@ -265,40 +248,61 @@ class Keyring(object):
|
|||||||
request_id = id(verify_request)
|
request_id = id(verify_request)
|
||||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||||
|
|
||||||
def remove_deferreds(res, verify_request):
|
# Wait for any previous lookups to complete before proceeding.
|
||||||
|
yield self.wait_for_previous_lookups(server_to_request_ids.keys())
|
||||||
|
|
||||||
|
# take out a lock on each of the servers by sticking a Deferred in
|
||||||
|
# key_downloads
|
||||||
|
for server_name in server_to_request_ids.keys():
|
||||||
|
self.key_downloads[server_name] = defer.Deferred()
|
||||||
|
logger.debug("Got key lookup lock on %s", server_name)
|
||||||
|
|
||||||
|
# When we've finished fetching all the keys for a given server_name,
|
||||||
|
# drop the lock by resolving the deferred in key_downloads.
|
||||||
|
def drop_server_lock(server_name):
|
||||||
|
d = self.key_downloads.pop(server_name)
|
||||||
|
d.callback(None)
|
||||||
|
|
||||||
|
def lookup_done(res, verify_request):
|
||||||
server_name = verify_request.server_name
|
server_name = verify_request.server_name
|
||||||
request_id = id(verify_request)
|
server_requests = server_to_request_ids[server_name]
|
||||||
server_to_request_ids[server_name].discard(request_id)
|
server_requests.remove(id(verify_request))
|
||||||
if not server_to_request_ids[server_name]:
|
|
||||||
d = server_to_deferred.pop(server_name, None)
|
# if there are no more requests for this server, we can drop the lock.
|
||||||
if d:
|
if not server_requests:
|
||||||
d.callback(None)
|
with PreserveLoggingContext(ctx):
|
||||||
|
logger.debug("Releasing key lookup lock on %s", server_name)
|
||||||
|
|
||||||
|
# ... but not immediately, as that can cause stack explosions if
|
||||||
|
# we get a long queue of lookups.
|
||||||
|
self.clock.call_later(0, drop_server_lock, server_name)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
verify_request.key_ready.addBoth(remove_deferreds, verify_request)
|
verify_request.key_ready.addBoth(lookup_done, verify_request)
|
||||||
|
|
||||||
|
# Actually start fetching keys.
|
||||||
|
self._get_server_verify_keys(verify_requests)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Error starting key lookups")
|
logger.exception("Error starting key lookups")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def wait_for_previous_lookups(self, server_to_deferred):
|
def wait_for_previous_lookups(self, server_names):
|
||||||
"""Waits for any previous key lookups for the given servers to finish.
|
"""Waits for any previous key lookups for the given servers to finish.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_to_deferred (dict[str, Deferred]): server_name to deferred which gets
|
server_names (Iterable[str]): list of servers which we want to look up
|
||||||
resolved once we've finished looking up keys for that server.
|
|
||||||
The Deferreds should be regular twisted ones which call their
|
|
||||||
callbacks with no logcontext.
|
|
||||||
|
|
||||||
Returns: a Deferred which resolves once all key lookups for the given
|
Returns:
|
||||||
servers have completed. Follows the synapse rules of logcontext
|
Deferred[None]: resolves once all key lookups for the given servers have
|
||||||
preservation.
|
completed. Follows the synapse rules of logcontext preservation.
|
||||||
"""
|
"""
|
||||||
loop_count = 1
|
loop_count = 1
|
||||||
while True:
|
while True:
|
||||||
wait_on = [
|
wait_on = [
|
||||||
(server_name, self.key_downloads[server_name])
|
(server_name, self.key_downloads[server_name])
|
||||||
for server_name in server_to_deferred.keys()
|
for server_name in server_names
|
||||||
if server_name in self.key_downloads
|
if server_name in self.key_downloads
|
||||||
]
|
]
|
||||||
if not wait_on:
|
if not wait_on:
|
||||||
@@ -313,19 +317,6 @@ class Keyring(object):
|
|||||||
|
|
||||||
loop_count += 1
|
loop_count += 1
|
||||||
|
|
||||||
ctx = LoggingContext.current_context()
|
|
||||||
|
|
||||||
def rm(r, server_name_):
|
|
||||||
with PreserveLoggingContext(ctx):
|
|
||||||
logger.debug("Releasing key lookup lock on %s", server_name_)
|
|
||||||
self.key_downloads.pop(server_name_, None)
|
|
||||||
return r
|
|
||||||
|
|
||||||
for server_name, deferred in server_to_deferred.items():
|
|
||||||
logger.debug("Got key lookup lock on %s", server_name)
|
|
||||||
self.key_downloads[server_name] = deferred
|
|
||||||
deferred.addBoth(rm, server_name)
|
|
||||||
|
|
||||||
def _get_server_verify_keys(self, verify_requests):
|
def _get_server_verify_keys(self, verify_requests):
|
||||||
"""Tries to find at least one key for each verify request
|
"""Tries to find at least one key for each verify request
|
||||||
|
|
||||||
@@ -471,7 +462,7 @@ class StoreKeyFetcher(KeyFetcher):
|
|||||||
keys = {}
|
keys = {}
|
||||||
for (server_name, key_id), key in res.items():
|
for (server_name, key_id), key in res.items():
|
||||||
keys.setdefault(server_name, {})[key_id] = key
|
keys.setdefault(server_name, {})[key_id] = key
|
||||||
defer.returnValue(keys)
|
return keys
|
||||||
|
|
||||||
|
|
||||||
class BaseV2KeyFetcher(object):
|
class BaseV2KeyFetcher(object):
|
||||||
@@ -557,7 +548,7 @@ class BaseV2KeyFetcher(object):
|
|||||||
|
|
||||||
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||||
|
|
||||||
yield logcontext.make_deferred_yieldable(
|
yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(
|
run_in_background(
|
||||||
@@ -575,7 +566,7 @@ class BaseV2KeyFetcher(object):
|
|||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(verify_keys)
|
return verify_keys
|
||||||
|
|
||||||
|
|
||||||
class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||||
@@ -597,7 +588,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
result = yield self.get_server_verify_key_v2_indirect(
|
result = yield self.get_server_verify_key_v2_indirect(
|
||||||
keys_to_fetch, key_server
|
keys_to_fetch, key_server
|
||||||
)
|
)
|
||||||
defer.returnValue(result)
|
return result
|
||||||
except KeyLookupError as e:
|
except KeyLookupError as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Key lookup failed from %r: %s", key_server.server_name, e
|
"Key lookup failed from %r: %s", key_server.server_name, e
|
||||||
@@ -610,9 +601,9 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
str(e),
|
str(e),
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
results = yield logcontext.make_deferred_yieldable(
|
results = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[run_in_background(get_key, server) for server in self.key_servers],
|
[run_in_background(get_key, server) for server in self.key_servers],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
@@ -624,7 +615,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
for server_name, keys in result.items():
|
for server_name, keys in result.items():
|
||||||
union_of_keys.setdefault(server_name, {}).update(keys)
|
union_of_keys.setdefault(server_name, {}).update(keys)
|
||||||
|
|
||||||
defer.returnValue(union_of_keys)
|
return union_of_keys
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
|
def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
|
||||||
@@ -710,7 +701,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
perspective_name, time_now_ms, added_keys
|
perspective_name, time_now_ms, added_keys
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(keys)
|
return keys
|
||||||
|
|
||||||
def _validate_perspectives_response(self, key_server, response):
|
def _validate_perspectives_response(self, key_server, response):
|
||||||
"""Optionally check the signature on the result of a /key/query request
|
"""Optionally check the signature on the result of a /key/query request
|
||||||
@@ -852,7 +843,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
|||||||
)
|
)
|
||||||
keys.update(response_keys)
|
keys.update(response_keys)
|
||||||
|
|
||||||
defer.returnValue(keys)
|
return keys
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
|||||||
@@ -104,6 +104,17 @@ class _EventInternalMetadata(object):
|
|||||||
"""
|
"""
|
||||||
return getattr(self, "proactively_send", True)
|
return getattr(self, "proactively_send", True)
|
||||||
|
|
||||||
|
def is_redacted(self):
|
||||||
|
"""Whether the event has been redacted.
|
||||||
|
|
||||||
|
This is used for efficiently checking whether an event has been
|
||||||
|
marked as redacted without needing to make another database call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool
|
||||||
|
"""
|
||||||
|
return getattr(self, "redacted", False)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
# We want to be able to use hasattr with the event dict properties.
|
# We want to be able to use hasattr with the event dict properties.
|
||||||
|
|||||||
@@ -144,15 +144,13 @@ class EventBuilder(object):
|
|||||||
if self._origin_server_ts is not None:
|
if self._origin_server_ts is not None:
|
||||||
event_dict["origin_server_ts"] = self._origin_server_ts
|
event_dict["origin_server_ts"] = self._origin_server_ts
|
||||||
|
|
||||||
defer.returnValue(
|
return create_local_event_from_event_dict(
|
||||||
create_local_event_from_event_dict(
|
clock=self._clock,
|
||||||
clock=self._clock,
|
hostname=self._hostname,
|
||||||
hostname=self._hostname,
|
signing_key=self._signing_key,
|
||||||
signing_key=self._signing_key,
|
format_version=self.format_version,
|
||||||
format_version=self.format_version,
|
event_dict=event_dict,
|
||||||
event_dict=event_dict,
|
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
+14
-16
@@ -19,7 +19,7 @@ from frozendict import frozendict
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
@@ -133,19 +133,17 @@ class EventContext(object):
|
|||||||
else:
|
else:
|
||||||
prev_state_id = None
|
prev_state_id = None
|
||||||
|
|
||||||
defer.returnValue(
|
return {
|
||||||
{
|
"prev_state_id": prev_state_id,
|
||||||
"prev_state_id": prev_state_id,
|
"event_type": event.type,
|
||||||
"event_type": event.type,
|
"event_state_key": event.state_key if event.is_state() else None,
|
||||||
"event_state_key": event.state_key if event.is_state() else None,
|
"state_group": self.state_group,
|
||||||
"state_group": self.state_group,
|
"rejected": self.rejected,
|
||||||
"rejected": self.rejected,
|
"prev_group": self.prev_group,
|
||||||
"prev_group": self.prev_group,
|
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
"prev_state_events": self.prev_state_events,
|
||||||
"prev_state_events": self.prev_state_events,
|
"app_service_id": self.app_service.id if self.app_service else None,
|
||||||
"app_service_id": self.app_service.id if self.app_service else None,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def deserialize(store, input):
|
def deserialize(store, input):
|
||||||
@@ -202,7 +200,7 @@ class EventContext(object):
|
|||||||
|
|
||||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||||
|
|
||||||
defer.returnValue(self._current_state_ids)
|
return self._current_state_ids
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_prev_state_ids(self, store):
|
def get_prev_state_ids(self, store):
|
||||||
@@ -222,7 +220,7 @@ class EventContext(object):
|
|||||||
|
|
||||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||||
|
|
||||||
defer.returnValue(self._prev_state_ids)
|
return self._prev_state_ids
|
||||||
|
|
||||||
def get_cached_current_state_ids(self):
|
def get_cached_current_state_ids(self):
|
||||||
"""Gets the current state IDs if we have them already cached.
|
"""Gets the current state IDs if we have them already cached.
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ class ThirdPartyEventRules(object):
|
|||||||
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
||||||
"""
|
"""
|
||||||
if self.third_party_rules is None:
|
if self.third_party_rules is None:
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ class ThirdPartyEventRules(object):
|
|||||||
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
|
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
|
||||||
|
|
||||||
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_create_room(self, requester, config, is_requester_admin):
|
def on_create_room(self, requester, config, is_requester_admin):
|
||||||
@@ -98,7 +98,7 @@ class ThirdPartyEventRules(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if self.third_party_rules is None:
|
if self.third_party_rules is None:
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|
||||||
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
||||||
room_state_events = yield self.store.get_events(state_ids.values())
|
room_state_events = yield self.store.get_events(state_ids.values())
|
||||||
@@ -110,4 +110,4 @@ class ThirdPartyEventRules(object):
|
|||||||
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
||||||
medium, address, state_events
|
medium, address, state_events
|
||||||
)
|
)
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
|
|||||||
+19
-7
@@ -52,10 +52,15 @@ def prune_event(event):
|
|||||||
|
|
||||||
from . import event_type_from_format_version
|
from . import event_type_from_format_version
|
||||||
|
|
||||||
return event_type_from_format_version(event.format_version)(
|
pruned_event = event_type_from_format_version(event.format_version)(
|
||||||
pruned_event_dict, event.internal_metadata.get_dict()
|
pruned_event_dict, event.internal_metadata.get_dict()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Mark the event as redacted
|
||||||
|
pruned_event.internal_metadata.redacted = True
|
||||||
|
|
||||||
|
return pruned_event
|
||||||
|
|
||||||
|
|
||||||
def prune_event_dict(event_dict):
|
def prune_event_dict(event_dict):
|
||||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||||
@@ -355,14 +360,17 @@ class EventClientSerializer(object):
|
|||||||
"""
|
"""
|
||||||
# To handle the case of presence events and the like
|
# To handle the case of presence events and the like
|
||||||
if not isinstance(event, EventBase):
|
if not isinstance(event, EventBase):
|
||||||
defer.returnValue(event)
|
return event
|
||||||
|
|
||||||
event_id = event.event_id
|
event_id = event.event_id
|
||||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||||
|
|
||||||
# If MSC1849 is enabled then we need to look if thre are any relations
|
# If MSC1849 is enabled then we need to look if there are any relations
|
||||||
# we need to bundle in with the event
|
# we need to bundle in with the event.
|
||||||
if self.experimental_msc1849_support_enabled and bundle_aggregations:
|
# Do not bundle relations if the event has been redacted
|
||||||
|
if not event.internal_metadata.is_redacted() and (
|
||||||
|
self.experimental_msc1849_support_enabled and bundle_aggregations
|
||||||
|
):
|
||||||
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
||||||
references = yield self.store.get_relations_for_event(
|
references = yield self.store.get_relations_for_event(
|
||||||
event_id, RelationTypes.REFERENCE, direction="f"
|
event_id, RelationTypes.REFERENCE, direction="f"
|
||||||
@@ -392,9 +400,13 @@ class EventClientSerializer(object):
|
|||||||
serialized_event["content"].pop("m.relates_to", None)
|
serialized_event["content"].pop("m.relates_to", None)
|
||||||
|
|
||||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
r[RelationTypes.REPLACE] = {"event_id": edit.event_id}
|
r[RelationTypes.REPLACE] = {
|
||||||
|
"event_id": edit.event_id,
|
||||||
|
"origin_server_ts": edit.origin_server_ts,
|
||||||
|
"sender": edit.sender,
|
||||||
|
}
|
||||||
|
|
||||||
defer.returnValue(serialized_event)
|
return serialized_event
|
||||||
|
|
||||||
def serialize_events(self, events, time_now, **kwargs):
|
def serialize_events(self, events, time_now, **kwargs):
|
||||||
"""Serializes multiple events.
|
"""Serializes multiple events.
|
||||||
|
|||||||
@@ -95,10 +95,10 @@ class EventValidator(object):
|
|||||||
|
|
||||||
elif event.type == EventTypes.Topic:
|
elif event.type == EventTypes.Topic:
|
||||||
self._ensure_strings(event.content, ["topic"])
|
self._ensure_strings(event.content, ["topic"])
|
||||||
|
self._ensure_state_event(event)
|
||||||
elif event.type == EventTypes.Name:
|
elif event.type == EventTypes.Name:
|
||||||
self._ensure_strings(event.content, ["name"])
|
self._ensure_strings(event.content, ["name"])
|
||||||
|
self._ensure_state_event(event)
|
||||||
elif event.type == EventTypes.Member:
|
elif event.type == EventTypes.Member:
|
||||||
if "membership" not in event.content:
|
if "membership" not in event.content:
|
||||||
raise SynapseError(400, "Content has not membership key")
|
raise SynapseError(400, "Content has not membership key")
|
||||||
@@ -106,9 +106,25 @@ class EventValidator(object):
|
|||||||
if event.content["membership"] not in Membership.LIST:
|
if event.content["membership"] not in Membership.LIST:
|
||||||
raise SynapseError(400, "Invalid membership key")
|
raise SynapseError(400, "Invalid membership key")
|
||||||
|
|
||||||
|
self._ensure_state_event(event)
|
||||||
|
elif event.type == EventTypes.Tombstone:
|
||||||
|
if "replacement_room" not in event.content:
|
||||||
|
raise SynapseError(400, "Content has no replacement_room key")
|
||||||
|
|
||||||
|
if event.content["replacement_room"] == event.room_id:
|
||||||
|
raise SynapseError(
|
||||||
|
400, "Tombstone cannot reference the room it was sent in"
|
||||||
|
)
|
||||||
|
|
||||||
|
self._ensure_state_event(event)
|
||||||
|
|
||||||
def _ensure_strings(self, d, keys):
|
def _ensure_strings(self, d, keys):
|
||||||
for s in keys:
|
for s in keys:
|
||||||
if s not in d:
|
if s not in d:
|
||||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||||
if not isinstance(d[s], string_types):
|
if not isinstance(d[s], string_types):
|
||||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||||
|
|
||||||
|
def _ensure_state_event(self, event):
|
||||||
|
if not event.is_state():
|
||||||
|
raise SynapseError(400, "'%s' must be state events" % (event.type,))
|
||||||
|
|||||||
@@ -27,8 +27,14 @@ from synapse.crypto.event_signing import check_event_content_hash
|
|||||||
from synapse.events import event_type_from_format_version
|
from synapse.events import event_type_from_format_version
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
|
from synapse.logging.context import (
|
||||||
|
LoggingContext,
|
||||||
|
PreserveLoggingContext,
|
||||||
|
make_deferred_yieldable,
|
||||||
|
preserve_fn,
|
||||||
|
)
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -73,7 +79,7 @@ class FederationBase(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_check_result(pdu, deferred):
|
def handle_check_result(pdu, deferred):
|
||||||
try:
|
try:
|
||||||
res = yield logcontext.make_deferred_yieldable(deferred)
|
res = yield make_deferred_yieldable(deferred)
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
res = None
|
res = None
|
||||||
|
|
||||||
@@ -100,22 +106,22 @@ class FederationBase(object):
|
|||||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(res)
|
return res
|
||||||
|
|
||||||
handle = logcontext.preserve_fn(handle_check_result)
|
handle = preserve_fn(handle_check_result)
|
||||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||||
|
|
||||||
valid_pdus = yield logcontext.make_deferred_yieldable(
|
valid_pdus = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
defer.gatherResults(deferreds2, consumeErrors=True)
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
if include_none:
|
if include_none:
|
||||||
defer.returnValue(valid_pdus)
|
return valid_pdus
|
||||||
else:
|
else:
|
||||||
defer.returnValue([p for p in valid_pdus if p])
|
return [p for p in valid_pdus if p]
|
||||||
|
|
||||||
def _check_sigs_and_hash(self, room_version, pdu):
|
def _check_sigs_and_hash(self, room_version, pdu):
|
||||||
return logcontext.make_deferred_yieldable(
|
return make_deferred_yieldable(
|
||||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -133,14 +139,14 @@ class FederationBase(object):
|
|||||||
* returns a redacted version of the event (if the signature
|
* returns a redacted version of the event (if the signature
|
||||||
matched but the hash did not)
|
matched but the hash did not)
|
||||||
* throws a SynapseError if the signature check failed.
|
* throws a SynapseError if the signature check failed.
|
||||||
The deferreds run their callbacks in the sentinel logcontext.
|
The deferreds run their callbacks in the sentinel
|
||||||
"""
|
"""
|
||||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||||
|
|
||||||
ctx = logcontext.LoggingContext.current_context()
|
ctx = LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu):
|
def callback(_, pdu):
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with PreserveLoggingContext(ctx):
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
# let's try to distinguish between failures because the event was
|
# let's try to distinguish between failures because the event was
|
||||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||||
@@ -178,7 +184,7 @@ class FederationBase(object):
|
|||||||
|
|
||||||
def errback(failure, pdu):
|
def errback(failure, pdu):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s: %s",
|
"Signature check failed for %s: %s",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
|
|||||||
@@ -39,10 +39,10 @@ from synapse.api.room_versions import (
|
|||||||
)
|
)
|
||||||
from synapse.events import builder, room_version_to_event_format
|
from synapse.events import builder, room_version_to_event_format
|
||||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -207,13 +207,13 @@ class FederationClient(FederationBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[:] = yield logcontext.make_deferred_yieldable(
|
pdus[:] = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(pdus)
|
return pdus
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -245,7 +245,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
ev = self._get_pdu_cache.get(event_id)
|
ev = self._get_pdu_cache.get(event_id)
|
||||||
if ev:
|
if ev:
|
||||||
defer.returnValue(ev)
|
return ev
|
||||||
|
|
||||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||||
|
|
||||||
@@ -307,7 +307,7 @@ class FederationClient(FederationBase):
|
|||||||
if signed_pdu:
|
if signed_pdu:
|
||||||
self._get_pdu_cache[event_id] = signed_pdu
|
self._get_pdu_cache[event_id] = signed_pdu
|
||||||
|
|
||||||
defer.returnValue(signed_pdu)
|
return signed_pdu
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -355,7 +355,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
auth_chain.sort(key=lambda e: e.depth)
|
auth_chain.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
defer.returnValue((pdus, auth_chain))
|
return (pdus, auth_chain)
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
if e.code == 400 or e.code == 404:
|
if e.code == 400 or e.code == 404:
|
||||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||||
@@ -404,7 +404,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
signed_auth.sort(key=lambda e: e.depth)
|
signed_auth.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
defer.returnValue((signed_pdus, signed_auth))
|
return (signed_pdus, signed_auth)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
|
||||||
@@ -429,7 +429,7 @@ class FederationClient(FederationBase):
|
|||||||
missing_events.discard(k)
|
missing_events.discard(k)
|
||||||
|
|
||||||
if not missing_events:
|
if not missing_events:
|
||||||
defer.returnValue((signed_events, failed_to_fetch))
|
return (signed_events, failed_to_fetch)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Fetching unknown state/auth events %s for room %s",
|
"Fetching unknown state/auth events %s for room %s",
|
||||||
@@ -465,7 +465,7 @@ class FederationClient(FederationBase):
|
|||||||
# We removed all events we successfully fetched from `batch`
|
# We removed all events we successfully fetched from `batch`
|
||||||
failed_to_fetch.update(batch)
|
failed_to_fetch.update(batch)
|
||||||
|
|
||||||
defer.returnValue((signed_events, failed_to_fetch))
|
return (signed_events, failed_to_fetch)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -485,7 +485,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
signed_auth.sort(key=lambda e: e.depth)
|
signed_auth.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
defer.returnValue(signed_auth)
|
return signed_auth
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _try_destination_list(self, description, destinations, callback):
|
def _try_destination_list(self, description, destinations, callback):
|
||||||
@@ -511,9 +511,8 @@ class FederationClient(FederationBase):
|
|||||||
The [Deferred] result of callback, if it succeeds
|
The [Deferred] result of callback, if it succeeds
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SynapseError if the chosen remote server returns a 300/400 code.
|
SynapseError if the chosen remote server returns a 300/400 code, or
|
||||||
|
no servers were reachable.
|
||||||
RuntimeError if no servers were reachable.
|
|
||||||
"""
|
"""
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
@@ -521,7 +520,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
res = yield callback(destination)
|
res = yield callback(destination)
|
||||||
defer.returnValue(res)
|
return res
|
||||||
except InvalidResponseError as e:
|
except InvalidResponseError as e:
|
||||||
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
@@ -538,7 +537,7 @@ class FederationClient(FederationBase):
|
|||||||
except Exception:
|
except Exception:
|
||||||
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
|
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
|
||||||
|
|
||||||
raise RuntimeError("Failed to %s via any server" % (description,))
|
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||||
|
|
||||||
def make_membership_event(
|
def make_membership_event(
|
||||||
self, destinations, room_id, user_id, membership, content, params
|
self, destinations, room_id, user_id, membership, content, params
|
||||||
@@ -615,7 +614,7 @@ class FederationClient(FederationBase):
|
|||||||
event_dict=pdu_dict,
|
event_dict=pdu_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((destination, ev, event_format))
|
return (destination, ev, event_format)
|
||||||
|
|
||||||
return self._try_destination_list(
|
return self._try_destination_list(
|
||||||
"make_" + membership, destinations, send_request
|
"make_" + membership, destinations, send_request
|
||||||
@@ -728,13 +727,11 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
check_authchain_validity(signed_auth)
|
check_authchain_validity(signed_auth)
|
||||||
|
|
||||||
defer.returnValue(
|
return {
|
||||||
{
|
"state": signed_state,
|
||||||
"state": signed_state,
|
"auth_chain": signed_auth,
|
||||||
"auth_chain": signed_auth,
|
"origin": destination,
|
||||||
"origin": destination,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return self._try_destination_list("send_join", destinations, send_request)
|
return self._try_destination_list("send_join", destinations, send_request)
|
||||||
|
|
||||||
@@ -758,7 +755,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
|
|
||||||
defer.returnValue(pdu)
|
return pdu
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _do_send_invite(self, destination, pdu, room_version):
|
def _do_send_invite(self, destination, pdu, room_version):
|
||||||
@@ -786,7 +783,7 @@ class FederationClient(FederationBase):
|
|||||||
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
return content
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
if e.code in [400, 404]:
|
if e.code in [400, 404]:
|
||||||
err = e.to_synapse_error()
|
err = e.to_synapse_error()
|
||||||
@@ -821,7 +818,7 @@ class FederationClient(FederationBase):
|
|||||||
event_id=pdu.event_id,
|
event_id=pdu.event_id,
|
||||||
content=pdu.get_pdu_json(time_now),
|
content=pdu.get_pdu_json(time_now),
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
def send_leave(self, destinations, pdu):
|
def send_leave(self, destinations, pdu):
|
||||||
"""Sends a leave event to one of a list of homeservers.
|
"""Sends a leave event to one of a list of homeservers.
|
||||||
@@ -856,7 +853,7 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Got content: %s", content)
|
logger.debug("Got content: %s", content)
|
||||||
defer.returnValue(None)
|
return None
|
||||||
|
|
||||||
return self._try_destination_list("send_leave", destinations, send_request)
|
return self._try_destination_list("send_leave", destinations, send_request)
|
||||||
|
|
||||||
@@ -917,7 +914,7 @@ class FederationClient(FederationBase):
|
|||||||
"missing": content.get("missing", []),
|
"missing": content.get("missing", []),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_missing_events(
|
def get_missing_events(
|
||||||
@@ -974,7 +971,7 @@ class FederationClient(FederationBase):
|
|||||||
# get_missing_events
|
# get_missing_events
|
||||||
signed_events = []
|
signed_events = []
|
||||||
|
|
||||||
defer.returnValue(signed_events)
|
return signed_events
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||||
@@ -986,7 +983,7 @@ class FederationClient(FederationBase):
|
|||||||
yield self.transport_layer.exchange_third_party_invite(
|
yield self.transport_layer.exchange_third_party_invite(
|
||||||
destination=destination, room_id=room_id, event_dict=event_dict
|
destination=destination, room_id=room_id, event_dict=event_dict
|
||||||
)
|
)
|
||||||
defer.returnValue(None)
|
return None
|
||||||
except CodeMessageException:
|
except CodeMessageException:
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -995,3 +992,39 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_room_complexity(self, destination, room_id):
|
||||||
|
"""
|
||||||
|
Fetch the complexity of a remote room from another server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): The remote server
|
||||||
|
room_id (str): The room ID to ask about.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[dict] or Deferred[None]: Dict contains the complexity
|
||||||
|
metric versions, while None means we could not fetch the complexity.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
complexity = yield self.transport_layer.get_room_complexity(
|
||||||
|
destination=destination, room_id=room_id
|
||||||
|
)
|
||||||
|
defer.returnValue(complexity)
|
||||||
|
except CodeMessageException as e:
|
||||||
|
# We didn't manage to get it -- probably a 404. We are okay if other
|
||||||
|
# servers don't give it to us.
|
||||||
|
logger.debug(
|
||||||
|
"Failed to fetch room complexity via %s for %s, got a %d",
|
||||||
|
destination,
|
||||||
|
room_id,
|
||||||
|
e.code,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to fetch room complexity via %s for %s", destination, room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# If we don't manage to find it, return None. It's not an error if a
|
||||||
|
# server doesn't give it to us.
|
||||||
|
defer.returnValue(None)
|
||||||
|
|||||||
@@ -42,6 +42,8 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
|
|||||||
from synapse.federation.persistence import TransactionActions
|
from synapse.federation.persistence import TransactionActions
|
||||||
from synapse.federation.units import Edu, Transaction
|
from synapse.federation.units import Edu, Transaction
|
||||||
from synapse.http.endpoint import parse_server_name
|
from synapse.http.endpoint import parse_server_name
|
||||||
|
from synapse.logging.context import nested_logging_context
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationFederationSendEduRestServlet,
|
ReplicationFederationSendEduRestServlet,
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
@@ -50,8 +52,6 @@ from synapse.types import get_domain_from_id
|
|||||||
from synapse.util import glob_to_regex
|
from synapse.util import glob_to_regex
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.util.logcontext import nested_logging_context
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
|
|
||||||
# when processing incoming transactions, we try to handle multiple rooms in
|
# when processing incoming transactions, we try to handle multiple rooms in
|
||||||
# parallel, up to this limit.
|
# parallel, up to this limit.
|
||||||
@@ -99,7 +99,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
res = self._transaction_from_pdus(pdus).get_dict()
|
res = self._transaction_from_pdus(pdus).get_dict()
|
||||||
|
|
||||||
defer.returnValue((200, res))
|
return (200, res)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -126,7 +126,7 @@ class FederationServer(FederationBase):
|
|||||||
origin, transaction, request_time
|
origin, transaction, request_time
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(result)
|
return result
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||||
@@ -147,8 +147,7 @@ class FederationServer(FederationBase):
|
|||||||
"[%s] We've already responded to this request",
|
"[%s] We've already responded to this request",
|
||||||
transaction.transaction_id,
|
transaction.transaction_id,
|
||||||
)
|
)
|
||||||
defer.returnValue(response)
|
return response
|
||||||
return
|
|
||||||
|
|
||||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||||
|
|
||||||
@@ -163,7 +162,7 @@ class FederationServer(FederationBase):
|
|||||||
yield self.transaction_actions.set_response(
|
yield self.transaction_actions.set_response(
|
||||||
origin, transaction, 400, response
|
origin, transaction, 400, response
|
||||||
)
|
)
|
||||||
defer.returnValue((400, response))
|
return (400, response)
|
||||||
|
|
||||||
received_pdus_counter.inc(len(transaction.pdus))
|
received_pdus_counter.inc(len(transaction.pdus))
|
||||||
|
|
||||||
@@ -265,7 +264,7 @@ class FederationServer(FederationBase):
|
|||||||
logger.debug("Returning: %s", str(response))
|
logger.debug("Returning: %s", str(response))
|
||||||
|
|
||||||
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
yield self.transaction_actions.set_response(origin, transaction, 200, response)
|
||||||
defer.returnValue((200, response))
|
return (200, response)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def received_edu(self, origin, edu_type, content):
|
def received_edu(self, origin, edu_type, content):
|
||||||
@@ -298,7 +297,7 @@ class FederationServer(FederationBase):
|
|||||||
event_id,
|
event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((200, resp))
|
return (200, resp)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_state_ids_request(self, origin, room_id, event_id):
|
def on_state_ids_request(self, origin, room_id, event_id):
|
||||||
@@ -315,9 +314,7 @@ class FederationServer(FederationBase):
|
|||||||
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
|
||||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||||
|
|
||||||
defer.returnValue(
|
return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
|
||||||
(200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _on_context_state_request_compute(self, room_id, event_id):
|
def _on_context_state_request_compute(self, room_id, event_id):
|
||||||
@@ -336,12 +333,10 @@ class FederationServer(FederationBase):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(
|
return {
|
||||||
{
|
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -349,15 +344,15 @@ class FederationServer(FederationBase):
|
|||||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
||||||
|
|
||||||
if pdu:
|
if pdu:
|
||||||
defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict()))
|
return (200, self._transaction_from_pdus([pdu]).get_dict())
|
||||||
else:
|
else:
|
||||||
defer.returnValue((404, ""))
|
return (404, "")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_query_request(self, query_type, args):
|
def on_query_request(self, query_type, args):
|
||||||
received_queries_counter.labels(query_type).inc()
|
received_queries_counter.labels(query_type).inc()
|
||||||
resp = yield self.registry.on_query(query_type, args)
|
resp = yield self.registry.on_query(query_type, args)
|
||||||
defer.returnValue((200, resp))
|
return (200, resp)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||||
@@ -369,11 +364,9 @@ class FederationServer(FederationBase):
|
|||||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||||
|
|
||||||
pdu = yield self.handler.on_make_join_request(room_id, user_id)
|
pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
defer.returnValue(
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||||
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_invite_request(self, origin, content, room_version):
|
def on_invite_request(self, origin, content, room_version):
|
||||||
@@ -391,7 +384,7 @@ class FederationServer(FederationBase):
|
|||||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
|
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_send_join_request(self, origin, content, room_id):
|
def on_send_join_request(self, origin, content, room_id):
|
||||||
@@ -407,30 +400,26 @@ class FederationServer(FederationBase):
|
|||||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
defer.returnValue(
|
return (
|
||||||
(
|
200,
|
||||||
200,
|
{
|
||||||
{
|
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
"auth_chain": [
|
||||||
"auth_chain": [
|
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
],
|
||||||
],
|
},
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_make_leave_request(self, origin, room_id, user_id):
|
def on_make_leave_request(self, origin, room_id, user_id):
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
yield self.check_server_matches_acl(origin_host, room_id)
|
||||||
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
|
||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
defer.returnValue(
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||||
{"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_send_leave_request(self, origin, content, room_id):
|
def on_send_leave_request(self, origin, content, room_id):
|
||||||
@@ -445,7 +434,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||||
yield self.handler.on_send_leave_request(origin, pdu)
|
yield self.handler.on_send_leave_request(origin, pdu)
|
||||||
defer.returnValue((200, {}))
|
return (200, {})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_event_auth(self, origin, room_id, event_id):
|
def on_event_auth(self, origin, room_id, event_id):
|
||||||
@@ -456,7 +445,7 @@ class FederationServer(FederationBase):
|
|||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||||
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
||||||
defer.returnValue((200, res))
|
return (200, res)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||||
@@ -509,7 +498,7 @@ class FederationServer(FederationBase):
|
|||||||
"missing": ret.get("missing", []),
|
"missing": ret.get("missing", []),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer.returnValue((200, send_content))
|
return (200, send_content)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def on_query_client_keys(self, origin, content):
|
def on_query_client_keys(self, origin, content):
|
||||||
@@ -548,7 +537,7 @@ class FederationServer(FederationBase):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({"one_time_keys": json_result})
|
return {"one_time_keys": json_result}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -580,9 +569,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
|
|
||||||
defer.returnValue(
|
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
||||||
{"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def on_openid_userinfo(self, token):
|
def on_openid_userinfo(self, token):
|
||||||
@@ -676,14 +663,14 @@ class FederationServer(FederationBase):
|
|||||||
ret = yield self.handler.exchange_third_party_invite(
|
ret = yield self.handler.exchange_third_party_invite(
|
||||||
sender_user_id, target_user_id, room_id, signed
|
sender_user_id, target_user_id, room_id, signed
|
||||||
)
|
)
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
||||||
ret = yield self.handler.on_exchange_third_party_invite_request(
|
ret = yield self.handler.on_exchange_third_party_invite_request(
|
||||||
origin, room_id, event_dict
|
origin, room_id, event_dict
|
||||||
)
|
)
|
||||||
defer.returnValue(ret)
|
return ret
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_server_matches_acl(self, server_name, room_id):
|
def check_server_matches_acl(self, server_name, room_id):
|
||||||
|
|||||||
@@ -21,9 +21,7 @@ These actions are mostly only used by the :py:mod:`.replication` module.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
from synapse.logging.utils import log_function
|
||||||
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -63,33 +61,3 @@ class TransactionActions(object):
|
|||||||
return self.store.set_received_txn_response(
|
return self.store.set_received_txn_response(
|
||||||
transaction.transaction_id, origin, code, response
|
transaction.transaction_id, origin, code, response
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
|
||||||
def prepare_to_send(self, transaction):
|
|
||||||
""" Persists the `Transaction` we are about to send and works out the
|
|
||||||
correct value for the `prev_ids` key.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred
|
|
||||||
"""
|
|
||||||
transaction.prev_ids = yield self.store.prep_send_transaction(
|
|
||||||
transaction.transaction_id,
|
|
||||||
transaction.destination,
|
|
||||||
transaction.origin_server_ts,
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def delivered(self, transaction, response_code, response_dict):
|
|
||||||
""" Marks the given `Transaction` as having been successfully
|
|
||||||
delivered to the remote homeserver, and what the response was.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred
|
|
||||||
"""
|
|
||||||
return self.store.delivered_txn(
|
|
||||||
transaction.transaction_id,
|
|
||||||
transaction.destination,
|
|
||||||
response_code,
|
|
||||||
response_dict,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -26,6 +26,11 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
|||||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||||
from synapse.federation.units import Edu
|
from synapse.federation.units import Edu
|
||||||
from synapse.handlers.presence import get_interested_remotes
|
from synapse.handlers.presence import get_interested_remotes
|
||||||
|
from synapse.logging.context import (
|
||||||
|
make_deferred_yieldable,
|
||||||
|
preserve_fn,
|
||||||
|
run_in_background,
|
||||||
|
)
|
||||||
from synapse.metrics import (
|
from synapse.metrics import (
|
||||||
LaterGauge,
|
LaterGauge,
|
||||||
event_processing_loop_counter,
|
event_processing_loop_counter,
|
||||||
@@ -33,7 +38,6 @@ from synapse.metrics import (
|
|||||||
events_processed_counter,
|
events_processed_counter,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util import logcontext
|
|
||||||
from synapse.util.metrics import measure_func
|
from synapse.util.metrics import measure_func
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -210,10 +214,10 @@ class FederationSender(object):
|
|||||||
for event in events:
|
for event in events:
|
||||||
events_by_room.setdefault(event.room_id, []).append(event)
|
events_by_room.setdefault(event.room_id, []).append(event)
|
||||||
|
|
||||||
yield logcontext.make_deferred_yieldable(
|
yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
logcontext.run_in_background(handle_room_events, evs)
|
run_in_background(handle_room_events, evs)
|
||||||
for evs in itervalues(events_by_room)
|
for evs in itervalues(events_by_room)
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
@@ -360,7 +364,7 @@ class FederationSender(object):
|
|||||||
for queue in queues:
|
for queue in queues:
|
||||||
queue.flush_read_receipts_for_room(room_id)
|
queue.flush_read_receipts_for_room(room_id)
|
||||||
|
|
||||||
@logcontext.preserve_fn # the caller should not yield on this
|
@preserve_fn # the caller should not yield on this
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_presence(self, states):
|
def send_presence(self, states):
|
||||||
"""Send the new presence states to the appropriate destinations.
|
"""Send the new presence states to the appropriate destinations.
|
||||||
|
|||||||
@@ -374,7 +374,7 @@ class PerDestinationQueue(object):
|
|||||||
|
|
||||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||||
|
|
||||||
defer.returnValue((edus, now_stream_id))
|
return (edus, now_stream_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_to_device_message_edus(self, limit):
|
def _get_to_device_message_edus(self, limit):
|
||||||
@@ -393,4 +393,4 @@ class PerDestinationQueue(object):
|
|||||||
for content in contents
|
for content in contents
|
||||||
]
|
]
|
||||||
|
|
||||||
defer.returnValue((edus, stream_id))
|
return (edus, stream_id)
|
||||||
|
|||||||
@@ -63,8 +63,6 @@ class TransactionManager(object):
|
|||||||
len(edus),
|
len(edus),
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
|
||||||
|
|
||||||
transaction = Transaction.create_new(
|
transaction = Transaction.create_new(
|
||||||
origin_server_ts=int(self.clock.time_msec()),
|
origin_server_ts=int(self.clock.time_msec()),
|
||||||
transaction_id=txn_id,
|
transaction_id=txn_id,
|
||||||
@@ -76,9 +74,6 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
self._next_txn_id += 1
|
self._next_txn_id += 1
|
||||||
|
|
||||||
yield self._transaction_actions.prepare_to_send(transaction)
|
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisted transaction", destination)
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
||||||
destination,
|
destination,
|
||||||
@@ -118,10 +113,6 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||||
|
|
||||||
yield self._transaction_actions.delivered(transaction, code, response)
|
|
||||||
|
|
||||||
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
|
||||||
|
|
||||||
if code == 200:
|
if code == 200:
|
||||||
for e_id, r in response.get("pdus", {}).items():
|
for e_id, r in response.get("pdus", {}).items():
|
||||||
if "error" in r:
|
if "error" in r:
|
||||||
@@ -142,4 +133,4 @@ class TransactionManager(object):
|
|||||||
)
|
)
|
||||||
success = False
|
success = False
|
||||||
|
|
||||||
defer.returnValue(success)
|
return success
|
||||||
|
|||||||
@@ -21,8 +21,12 @@ from six.moves import urllib
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
from synapse.api.urls import (
|
||||||
from synapse.util.logutils import log_function
|
FEDERATION_UNSTABLE_PREFIX,
|
||||||
|
FEDERATION_V1_PREFIX,
|
||||||
|
FEDERATION_V2_PREFIX,
|
||||||
|
)
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -183,7 +187,7 @@ class TransportLayerClient(object):
|
|||||||
try_trailing_slash_on_400=True,
|
try_trailing_slash_on_400=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -201,7 +205,7 @@ class TransportLayerClient(object):
|
|||||||
ignore_backoff=ignore_backoff,
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -259,7 +263,7 @@ class TransportLayerClient(object):
|
|||||||
ignore_backoff=ignore_backoff,
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -270,7 +274,7 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=content
|
destination=destination, path=path, data=content
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -288,7 +292,7 @@ class TransportLayerClient(object):
|
|||||||
ignore_backoff=True,
|
ignore_backoff=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -299,7 +303,7 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=content, ignore_backoff=True
|
destination=destination, path=path, data=content, ignore_backoff=True
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -310,7 +314,7 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=content, ignore_backoff=True
|
destination=destination, path=path, data=content, ignore_backoff=True
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -339,7 +343,7 @@ class TransportLayerClient(object):
|
|||||||
destination=remote_server, path=path, args=args, ignore_backoff=True
|
destination=remote_server, path=path, args=args, ignore_backoff=True
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -350,7 +354,7 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=event_dict
|
destination=destination, path=path, data=event_dict
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -359,7 +363,7 @@ class TransportLayerClient(object):
|
|||||||
|
|
||||||
content = yield self.client.get_json(destination=destination, path=path)
|
content = yield self.client.get_json(destination=destination, path=path)
|
||||||
|
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -370,7 +374,7 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=content
|
destination=destination, path=path, data=content
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -402,7 +406,7 @@ class TransportLayerClient(object):
|
|||||||
content = yield self.client.post_json(
|
content = yield self.client.post_json(
|
||||||
destination=destination, path=path, data=query_content, timeout=timeout
|
destination=destination, path=path, data=query_content, timeout=timeout
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -426,7 +430,7 @@ class TransportLayerClient(object):
|
|||||||
content = yield self.client.get_json(
|
content = yield self.client.get_json(
|
||||||
destination=destination, path=path, timeout=timeout
|
destination=destination, path=path, timeout=timeout
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -460,7 +464,7 @@ class TransportLayerClient(object):
|
|||||||
content = yield self.client.post_json(
|
content = yield self.client.post_json(
|
||||||
destination=destination, path=path, data=query_content, timeout=timeout
|
destination=destination, path=path, data=query_content, timeout=timeout
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -488,7 +492,7 @@ class TransportLayerClient(object):
|
|||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
return content
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def get_group_profile(self, destination, group_id, requester_user_id):
|
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||||
@@ -935,6 +939,23 @@ class TransportLayerClient(object):
|
|||||||
destination=destination, path=path, data=content, ignore_backoff=True
|
destination=destination, path=path, data=content, ignore_backoff=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_room_complexity(self, destination, room_id):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
destination (str): The remote server
|
||||||
|
room_id (str): The room ID to ask about.
|
||||||
|
"""
|
||||||
|
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id)
|
||||||
|
|
||||||
|
return self.client.get_json(destination=destination, path=path)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_path(federation_prefix, path, *args):
|
||||||
|
"""
|
||||||
|
Ensures that all args are url encoded.
|
||||||
|
"""
|
||||||
|
return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||||
|
|
||||||
|
|
||||||
def _create_v1_path(path, *args):
|
def _create_v1_path(path, *args):
|
||||||
"""Creates a path against V1 federation API from the path template and
|
"""Creates a path against V1 federation API from the path template and
|
||||||
@@ -951,9 +972,7 @@ def _create_v1_path(path, *args):
|
|||||||
Returns:
|
Returns:
|
||||||
str
|
str
|
||||||
"""
|
"""
|
||||||
return FEDERATION_V1_PREFIX + path % tuple(
|
return _create_path(FEDERATION_V1_PREFIX, path, *args)
|
||||||
urllib.parse.quote(arg, "") for arg in args
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_v2_path(path, *args):
|
def _create_v2_path(path, *args):
|
||||||
@@ -971,6 +990,4 @@ def _create_v2_path(path, *args):
|
|||||||
Returns:
|
Returns:
|
||||||
str
|
str
|
||||||
"""
|
"""
|
||||||
return FEDERATION_V2_PREFIX + path % tuple(
|
return _create_path(FEDERATION_V2_PREFIX, path, *args)
|
||||||
urllib.parse.quote(arg, "") for arg in args
|
|
||||||
)
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -43,9 +43,9 @@ from signedjson.sign import sign_json
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -157,7 +157,7 @@ class GroupAttestionRenewer(object):
|
|||||||
|
|
||||||
yield self.store.update_remote_attestion(group_id, user_id, attestation)
|
yield self.store.update_remote_attestion(group_id, user_id, attestation)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
def _start_renew_attestations(self):
|
def _start_renew_attestations(self):
|
||||||
return run_as_background_process("renew_attestations", self._renew_attestations)
|
return run_as_background_process("renew_attestations", self._renew_attestations)
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ class GroupsServerHandler(object):
|
|||||||
if not is_admin:
|
if not is_admin:
|
||||||
raise SynapseError(403, "User is not admin in group")
|
raise SynapseError(403, "User is not admin in group")
|
||||||
|
|
||||||
defer.returnValue(group)
|
return group
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_summary(self, group_id, requester_user_id):
|
def get_group_summary(self, group_id, requester_user_id):
|
||||||
@@ -151,22 +151,20 @@ class GroupsServerHandler(object):
|
|||||||
group_id, requester_user_id
|
group_id, requester_user_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(
|
return {
|
||||||
{
|
"profile": profile,
|
||||||
"profile": profile,
|
"users_section": {
|
||||||
"users_section": {
|
"users": users,
|
||||||
"users": users,
|
"roles": roles,
|
||||||
"roles": roles,
|
"total_user_count_estimate": 0, # TODO
|
||||||
"total_user_count_estimate": 0, # TODO
|
},
|
||||||
},
|
"rooms_section": {
|
||||||
"rooms_section": {
|
"rooms": rooms,
|
||||||
"rooms": rooms,
|
"categories": categories,
|
||||||
"categories": categories,
|
"total_room_count_estimate": 0, # TODO
|
||||||
"total_room_count_estimate": 0, # TODO
|
},
|
||||||
},
|
"user": membership_info,
|
||||||
"user": membership_info,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def update_group_summary_room(
|
def update_group_summary_room(
|
||||||
@@ -192,7 +190,7 @@ class GroupsServerHandler(object):
|
|||||||
is_public=is_public,
|
is_public=is_public,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_group_summary_room(
|
def delete_group_summary_room(
|
||||||
@@ -208,7 +206,7 @@ class GroupsServerHandler(object):
|
|||||||
group_id=group_id, room_id=room_id, category_id=category_id
|
group_id=group_id, room_id=room_id, category_id=category_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def set_group_join_policy(self, group_id, requester_user_id, content):
|
def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||||
@@ -228,7 +226,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_categories(self, group_id, requester_user_id):
|
def get_group_categories(self, group_id, requester_user_id):
|
||||||
@@ -237,7 +235,7 @@ class GroupsServerHandler(object):
|
|||||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||||
|
|
||||||
categories = yield self.store.get_group_categories(group_id=group_id)
|
categories = yield self.store.get_group_categories(group_id=group_id)
|
||||||
defer.returnValue({"categories": categories})
|
return {"categories": categories}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_category(self, group_id, requester_user_id, category_id):
|
def get_group_category(self, group_id, requester_user_id, category_id):
|
||||||
@@ -249,7 +247,7 @@ class GroupsServerHandler(object):
|
|||||||
group_id=group_id, category_id=category_id
|
group_id=group_id, category_id=category_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(res)
|
return res
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
||||||
@@ -269,7 +267,7 @@ class GroupsServerHandler(object):
|
|||||||
profile=profile,
|
profile=profile,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_group_category(self, group_id, requester_user_id, category_id):
|
def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||||
@@ -283,7 +281,7 @@ class GroupsServerHandler(object):
|
|||||||
group_id=group_id, category_id=category_id
|
group_id=group_id, category_id=category_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_roles(self, group_id, requester_user_id):
|
def get_group_roles(self, group_id, requester_user_id):
|
||||||
@@ -292,7 +290,7 @@ class GroupsServerHandler(object):
|
|||||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||||
|
|
||||||
roles = yield self.store.get_group_roles(group_id=group_id)
|
roles = yield self.store.get_group_roles(group_id=group_id)
|
||||||
defer.returnValue({"roles": roles})
|
return {"roles": roles}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_role(self, group_id, requester_user_id, role_id):
|
def get_group_role(self, group_id, requester_user_id, role_id):
|
||||||
@@ -301,7 +299,7 @@ class GroupsServerHandler(object):
|
|||||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||||
|
|
||||||
res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
|
res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
|
||||||
defer.returnValue(res)
|
return res
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||||
@@ -319,7 +317,7 @@ class GroupsServerHandler(object):
|
|||||||
group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
|
group_id=group_id, role_id=role_id, is_public=is_public, profile=profile
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_group_role(self, group_id, requester_user_id, role_id):
|
def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||||
@@ -331,7 +329,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
|
yield self.store.remove_group_role(group_id=group_id, role_id=role_id)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def update_group_summary_user(
|
def update_group_summary_user(
|
||||||
@@ -355,7 +353,7 @@ class GroupsServerHandler(object):
|
|||||||
is_public=is_public,
|
is_public=is_public,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
||||||
@@ -369,7 +367,7 @@ class GroupsServerHandler(object):
|
|||||||
group_id=group_id, user_id=user_id, role_id=role_id
|
group_id=group_id, user_id=user_id, role_id=role_id
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_group_profile(self, group_id, requester_user_id):
|
def get_group_profile(self, group_id, requester_user_id):
|
||||||
@@ -391,7 +389,7 @@ class GroupsServerHandler(object):
|
|||||||
group_description = {key: group[key] for key in cols}
|
group_description = {key: group[key] for key in cols}
|
||||||
group_description["is_openly_joinable"] = group["join_policy"] == "open"
|
group_description["is_openly_joinable"] = group["join_policy"] == "open"
|
||||||
|
|
||||||
defer.returnValue(group_description)
|
return group_description
|
||||||
else:
|
else:
|
||||||
raise SynapseError(404, "Unknown group")
|
raise SynapseError(404, "Unknown group")
|
||||||
|
|
||||||
@@ -461,9 +459,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
# TODO: If admin add lists of users whose attestations have timed out
|
# TODO: If admin add lists of users whose attestations have timed out
|
||||||
|
|
||||||
defer.returnValue(
|
return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
|
||||||
{"chunk": chunk, "total_user_count_estimate": len(user_results)}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_invited_users_in_group(self, group_id, requester_user_id):
|
def get_invited_users_in_group(self, group_id, requester_user_id):
|
||||||
@@ -494,9 +490,7 @@ class GroupsServerHandler(object):
|
|||||||
logger.warn("Error getting profile for %s: %s", user_id, e)
|
logger.warn("Error getting profile for %s: %s", user_id, e)
|
||||||
user_profiles.append(user_profile)
|
user_profiles.append(user_profile)
|
||||||
|
|
||||||
defer.returnValue(
|
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||||
{"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_rooms_in_group(self, group_id, requester_user_id):
|
def get_rooms_in_group(self, group_id, requester_user_id):
|
||||||
@@ -533,9 +527,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
chunk.sort(key=lambda e: -e["num_joined_members"])
|
chunk.sort(key=lambda e: -e["num_joined_members"])
|
||||||
|
|
||||||
defer.returnValue(
|
return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
|
||||||
{"chunk": chunk, "total_room_count_estimate": len(room_results)}
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
||||||
@@ -551,7 +543,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
yield self.store.add_room_to_group(group_id, room_id, is_public=is_public)
|
yield self.store.add_room_to_group(group_id, room_id, is_public=is_public)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def update_room_in_group(
|
def update_room_in_group(
|
||||||
@@ -574,7 +566,7 @@ class GroupsServerHandler(object):
|
|||||||
else:
|
else:
|
||||||
raise SynapseError(400, "Uknown config option")
|
raise SynapseError(400, "Uknown config option")
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def remove_room_from_group(self, group_id, requester_user_id, room_id):
|
def remove_room_from_group(self, group_id, requester_user_id, room_id):
|
||||||
@@ -586,7 +578,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
yield self.store.remove_room_from_group(group_id, room_id)
|
yield self.store.remove_room_from_group(group_id, room_id)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def invite_to_group(self, group_id, user_id, requester_user_id, content):
|
def invite_to_group(self, group_id, user_id, requester_user_id, content):
|
||||||
@@ -644,9 +636,9 @@ class GroupsServerHandler(object):
|
|||||||
)
|
)
|
||||||
elif res["state"] == "invite":
|
elif res["state"] == "invite":
|
||||||
yield self.store.add_group_invite(group_id, user_id)
|
yield self.store.add_group_invite(group_id, user_id)
|
||||||
defer.returnValue({"state": "invite"})
|
return {"state": "invite"}
|
||||||
elif res["state"] == "reject":
|
elif res["state"] == "reject":
|
||||||
defer.returnValue({"state": "reject"})
|
return {"state": "reject"}
|
||||||
else:
|
else:
|
||||||
raise SynapseError(502, "Unknown state returned by HS")
|
raise SynapseError(502, "Unknown state returned by HS")
|
||||||
|
|
||||||
@@ -679,7 +671,7 @@ class GroupsServerHandler(object):
|
|||||||
remote_attestation=remote_attestation,
|
remote_attestation=remote_attestation,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(local_attestation)
|
return local_attestation
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def accept_invite(self, group_id, requester_user_id, content):
|
def accept_invite(self, group_id, requester_user_id, content):
|
||||||
@@ -699,7 +691,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||||
|
|
||||||
defer.returnValue({"state": "join", "attestation": local_attestation})
|
return {"state": "join", "attestation": local_attestation}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def join_group(self, group_id, requester_user_id, content):
|
def join_group(self, group_id, requester_user_id, content):
|
||||||
@@ -716,7 +708,7 @@ class GroupsServerHandler(object):
|
|||||||
|
|
||||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||||
|
|
||||||
defer.returnValue({"state": "join", "attestation": local_attestation})
|
return {"state": "join", "attestation": local_attestation}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def knock(self, group_id, requester_user_id, content):
|
def knock(self, group_id, requester_user_id, content):
|
||||||
@@ -769,7 +761,7 @@ class GroupsServerHandler(object):
|
|||||||
if not self.hs.is_mine_id(user_id):
|
if not self.hs.is_mine_id(user_id):
|
||||||
yield self.store.maybe_delete_remote_profile_cache(user_id)
|
yield self.store.maybe_delete_remote_profile_cache(user_id)
|
||||||
|
|
||||||
defer.returnValue({})
|
return {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def create_group(self, group_id, requester_user_id, content):
|
def create_group(self, group_id, requester_user_id, content):
|
||||||
@@ -845,7 +837,7 @@ class GroupsServerHandler(object):
|
|||||||
avatar_url=user_profile.get("avatar_url"),
|
avatar_url=user_profile.get("avatar_url"),
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue({"group_id": group_id})
|
return {"group_id": group_id}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_group(self, group_id, requester_user_id):
|
def delete_group(self, group_id, requester_user_id):
|
||||||
|
|||||||
+31
-10
@@ -45,6 +45,7 @@ class BaseHandler(object):
|
|||||||
self.state_handler = hs.get_state_handler()
|
self.state_handler = hs.get_state_handler()
|
||||||
self.distributor = hs.get_distributor()
|
self.distributor = hs.get_distributor()
|
||||||
self.ratelimiter = hs.get_ratelimiter()
|
self.ratelimiter = hs.get_ratelimiter()
|
||||||
|
self.admin_redaction_ratelimiter = hs.get_admin_redaction_ratelimiter()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@ class BaseHandler(object):
|
|||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def ratelimit(self, requester, update=True):
|
def ratelimit(self, requester, update=True, is_admin_redaction=False):
|
||||||
"""Ratelimits requests.
|
"""Ratelimits requests.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -62,6 +63,9 @@ class BaseHandler(object):
|
|||||||
Set to False when doing multiple checks for one request (e.g.
|
Set to False when doing multiple checks for one request (e.g.
|
||||||
to check up front if we would reject the request), and set to
|
to check up front if we would reject the request), and set to
|
||||||
True for the last call for a given request.
|
True for the last call for a given request.
|
||||||
|
is_admin_redaction (bool): Whether this is a room admin/moderator
|
||||||
|
redacting an event. If so then we may apply different
|
||||||
|
ratelimits depending on config.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
LimitExceededError if the request should be ratelimited
|
LimitExceededError if the request should be ratelimited
|
||||||
@@ -90,16 +94,33 @@ class BaseHandler(object):
|
|||||||
messages_per_second = override.messages_per_second
|
messages_per_second = override.messages_per_second
|
||||||
burst_count = override.burst_count
|
burst_count = override.burst_count
|
||||||
else:
|
else:
|
||||||
messages_per_second = self.hs.config.rc_message.per_second
|
# We default to different values if this is an admin redaction and
|
||||||
burst_count = self.hs.config.rc_message.burst_count
|
# the config is set
|
||||||
|
if is_admin_redaction and self.hs.config.rc_admin_redaction:
|
||||||
|
messages_per_second = self.hs.config.rc_admin_redaction.per_second
|
||||||
|
burst_count = self.hs.config.rc_admin_redaction.burst_count
|
||||||
|
else:
|
||||||
|
messages_per_second = self.hs.config.rc_message.per_second
|
||||||
|
burst_count = self.hs.config.rc_message.burst_count
|
||||||
|
|
||||||
allowed, time_allowed = self.ratelimiter.can_do_action(
|
if is_admin_redaction and self.hs.config.rc_admin_redaction:
|
||||||
user_id,
|
# If we have separate config for admin redactions we use a separate
|
||||||
time_now,
|
# ratelimiter
|
||||||
rate_hz=messages_per_second,
|
allowed, time_allowed = self.admin_redaction_ratelimiter.can_do_action(
|
||||||
burst_count=burst_count,
|
user_id,
|
||||||
update=update,
|
time_now,
|
||||||
)
|
rate_hz=messages_per_second,
|
||||||
|
burst_count=burst_count,
|
||||||
|
update=update,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
allowed, time_allowed = self.ratelimiter.can_do_action(
|
||||||
|
user_id,
|
||||||
|
time_now,
|
||||||
|
rate_hz=messages_per_second,
|
||||||
|
burst_count=burst_count,
|
||||||
|
update=update,
|
||||||
|
)
|
||||||
if not allowed:
|
if not allowed:
|
||||||
raise LimitExceededError(
|
raise LimitExceededError(
|
||||||
retry_after_ms=int(1000 * (time_allowed - time_now))
|
retry_after_ms=int(1000 * (time_allowed - time_now))
|
||||||
|
|||||||
@@ -51,8 +51,8 @@ class AccountDataEventSource(object):
|
|||||||
{"type": account_data_type, "content": content, "room_id": room_id}
|
{"type": account_data_type, "content": content, "room_id": room_id}
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((results, current_stream_id))
|
return (results, current_stream_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_pagination_rows(self, user, config, key):
|
def get_pagination_rows(self, user, config, key):
|
||||||
defer.returnValue(([], config.to_id))
|
return ([], config.to_id)
|
||||||
|
|||||||
@@ -22,9 +22,10 @@ from email.mime.text import MIMEText
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from synapse.push.mailer import load_jinja2_templates
|
from synapse.push.mailer import load_jinja2_templates
|
||||||
@@ -67,7 +68,14 @@ class AccountValidityHandler(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Check the renewal emails to send and send them every 30min.
|
# Check the renewal emails to send and send them every 30min.
|
||||||
self.clock.looping_call(self.send_renewal_emails, 30 * 60 * 1000)
|
def send_emails():
|
||||||
|
# run as a background process to make sure that the database transactions
|
||||||
|
# have a logcontext to report to
|
||||||
|
return run_as_background_process(
|
||||||
|
"send_renewals", self.send_renewal_emails
|
||||||
|
)
|
||||||
|
|
||||||
|
self.clock.looping_call(send_emails, 30 * 60 * 1000)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_renewal_emails(self):
|
def send_renewal_emails(self):
|
||||||
@@ -185,7 +193,7 @@ class AccountValidityHandler(object):
|
|||||||
if threepid["medium"] == "email":
|
if threepid["medium"] == "email":
|
||||||
addresses.append(threepid["address"])
|
addresses.append(threepid["address"])
|
||||||
|
|
||||||
defer.returnValue(addresses)
|
return addresses
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_renewal_token(self, user_id):
|
def _get_renewal_token(self, user_id):
|
||||||
@@ -206,7 +214,7 @@ class AccountValidityHandler(object):
|
|||||||
try:
|
try:
|
||||||
renewal_token = stringutils.random_string(32)
|
renewal_token = stringutils.random_string(32)
|
||||||
yield self.store.set_renewal_token_for_user(user_id, renewal_token)
|
yield self.store.set_renewal_token_for_user(user_id, renewal_token)
|
||||||
defer.returnValue(renewal_token)
|
return renewal_token
|
||||||
except StoreError:
|
except StoreError:
|
||||||
attempts += 1
|
attempts += 1
|
||||||
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
|
raise StoreError(500, "Couldn't generate a unique string as refresh string.")
|
||||||
@@ -218,11 +226,19 @@ class AccountValidityHandler(object):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
renewal_token (str): Token sent with the renewal request.
|
renewal_token (str): Token sent with the renewal request.
|
||||||
|
Returns:
|
||||||
|
bool: Whether the provided token is valid.
|
||||||
"""
|
"""
|
||||||
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
|
try:
|
||||||
|
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
|
||||||
|
except StoreError:
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
logger.debug("Renewing an account for user %s", user_id)
|
logger.debug("Renewing an account for user %s", user_id)
|
||||||
yield self.renew_account_for_user(user_id)
|
yield self.renew_account_for_user(user_id)
|
||||||
|
|
||||||
|
defer.returnValue(True)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
|
def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
|
||||||
"""Renews the account attached to a given user by pushing back the
|
"""Renews the account attached to a given user by pushing back the
|
||||||
@@ -246,4 +262,4 @@ class AccountValidityHandler(object):
|
|||||||
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
|
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(expiration_ts)
|
return expiration_ts
|
||||||
|
|||||||
@@ -100,4 +100,4 @@ class AcmeHandler(object):
|
|||||||
logger.exception("Failed saving!")
|
logger.exception("Failed saving!")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
defer.returnValue(True)
|
return True
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user