Compare commits
281 Commits
anoa/remov
...
hawkowl/fs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36e6be9294 | ||
|
|
e5eddea699 | ||
|
|
0b6fbb28a8 | ||
|
|
e9906b0772 | ||
|
|
f218705d2a | ||
|
|
2546f32b90 | ||
|
|
9d9cf3583b | ||
|
|
2bec3a4953 | ||
|
|
3de6cc245f | ||
|
|
156a461cbd | ||
|
|
c9456193d3 | ||
|
|
fb86217553 | ||
|
|
41546f946e | ||
|
|
a7f0161276 | ||
|
|
107ad133fc | ||
|
|
b15a4ab405 | ||
|
|
af9f1c0764 | ||
|
|
d1b5b055be | ||
|
|
edeae53221 | ||
|
|
c32d359094 | ||
|
|
bf4db42920 | ||
|
|
977fa4a717 | ||
|
|
6881f21f3e | ||
|
|
8ed9e63432 | ||
|
|
d55bc4a8bf | ||
|
|
5d018d23f0 | ||
|
|
93fd3cbc7a | ||
|
|
3c076c79c5 | ||
|
|
a8f40a8302 | ||
|
|
55a0c98d16 | ||
|
|
0b36decfb6 | ||
|
|
da378af445 | ||
|
|
d2e3d5b9db | ||
|
|
76a58fdcce | ||
|
|
58af30a6c7 | ||
|
|
0f632f3a57 | ||
|
|
ad167c3849 | ||
|
|
f25f638c35 | ||
|
|
3ff3dfe5a3 | ||
|
|
f4a30d286f | ||
|
|
bc35503528 | ||
|
|
a4a9ded4d0 | ||
|
|
e5a0224837 | ||
|
|
dc4d74e44a | ||
|
|
c5288e9984 | ||
|
|
2e697d3013 | ||
|
|
0eefb76fa1 | ||
|
|
cf89266b98 | ||
|
|
02735e140f | ||
|
|
f31d4cb7a2 | ||
|
|
72167fb394 | ||
|
|
58a755cdc3 | ||
|
|
8fde611a8c | ||
|
|
8f15832950 | ||
|
|
9fe6ad5fef | ||
|
|
fe2f2fc530 | ||
|
|
6be336c0d8 | ||
|
|
3b7a35a59a | ||
|
|
a9bcae9f50 | ||
|
|
d4f91e7e9f | ||
|
|
4037d3220a | ||
|
|
123c04daa7 | ||
|
|
62a2d60d72 | ||
|
|
958d69f300 | ||
|
|
15056ca208 | ||
|
|
f92d05e254 | ||
|
|
7a48d0bab8 | ||
|
|
b4d5ff0af7 | ||
|
|
e23ab7f41a | ||
|
|
1ec7d656dd | ||
|
|
458e51df7a | ||
|
|
63eb4a1b62 | ||
|
|
8c97f6414c | ||
|
|
5c3eecc70f | ||
|
|
4e97eb89e5 | ||
|
|
448bcfd0f9 | ||
|
|
e6a6c4fbab | ||
|
|
c9964ba600 | ||
|
|
865077f1d1 | ||
|
|
aecae8f397 | ||
|
|
7c8c3b8437 | ||
|
|
3e013b7c8e | ||
|
|
2a12d76646 | ||
|
|
97a8b4caf7 | ||
|
|
df3a5db629 | ||
|
|
85b0bd8fe0 | ||
|
|
105e7f6ed3 | ||
|
|
3b476f5767 | ||
|
|
d94916852f | ||
|
|
84c6ea1af8 | ||
|
|
45df38e61b | ||
|
|
fa87004bc1 | ||
|
|
bd083a5fcf | ||
|
|
244953be3f | ||
|
|
08352d44f8 | ||
|
|
d74595e2ca | ||
|
|
1a93daf353 | ||
|
|
97bf307755 | ||
|
|
992333b995 | ||
|
|
8b16696b24 | ||
|
|
dde6ea7ff6 | ||
|
|
2e9cf7dda5 | ||
|
|
14c24c9037 | ||
|
|
a0ee2ec458 | ||
|
|
d1020653fc | ||
|
|
1f8bae7724 | ||
|
|
1cad8d7b6f | ||
|
|
0f2ecb961e | ||
|
|
26d742fed6 | ||
|
|
70e18cee00 | ||
|
|
b1605cdd23 | ||
|
|
618bd1ee76 | ||
|
|
f16aa3a44b | ||
|
|
c0a1301ccd | ||
|
|
baf081cd3b | ||
|
|
2d573e2e2b | ||
|
|
2276936bac | ||
|
|
f30a71a67b | ||
|
|
cf2972c818 | ||
|
|
c159803067 | ||
|
|
0c4a99607e | ||
|
|
62921fb53e | ||
|
|
32768e96d4 | ||
|
|
418635e68a | ||
|
|
adcd5368b0 | ||
|
|
73bbaf2bc6 | ||
|
|
3641784e8c | ||
|
|
65afc535a6 | ||
|
|
4806651744 | ||
|
|
fadfde9aaa | ||
|
|
18a466b84e | ||
|
|
3db1377b26 | ||
|
|
841b12867e | ||
|
|
73bf452666 | ||
|
|
22d2338ace | ||
|
|
1883223a01 | ||
|
|
4f6984aa88 | ||
|
|
cda4460d99 | ||
|
|
39e594b765 | ||
|
|
cf0006719d | ||
|
|
b2a629ef49 | ||
|
|
d9ea9881d2 | ||
|
|
c96322c8d2 | ||
|
|
0d0f6d12bc | ||
|
|
17c27df6ea | ||
|
|
80cfad233e | ||
|
|
720d30469f | ||
|
|
79f689e6c2 | ||
|
|
c560b791e1 | ||
|
|
8e513e7afc | ||
|
|
22e862304a | ||
|
|
0cb72812f9 | ||
|
|
f477ce4b1a | ||
|
|
66f5ff72fd | ||
|
|
2017369f7d | ||
|
|
8b0d5b171e | ||
|
|
5ea773c505 | ||
|
|
54437c48ca | ||
|
|
f337d2f0f0 | ||
|
|
0fd171770a | ||
|
|
826e6ec3bd | ||
|
|
f99554b15d | ||
|
|
dc7cf81267 | ||
|
|
f214bff0c0 | ||
|
|
dcca56baba | ||
|
|
c7095be913 | ||
|
|
7704873cb8 | ||
|
|
d7bd9651bc | ||
|
|
5c07c97c09 | ||
|
|
7b8bc61834 | ||
|
|
ced4fdaa84 | ||
|
|
2410335507 | ||
|
|
bd2e1a2aa8 | ||
|
|
ebc5ed1296 | ||
|
|
5c05ae7ba0 | ||
|
|
b73ce4ba81 | ||
|
|
356ed0438e | ||
|
|
6a85cb5ef7 | ||
|
|
a3e40bd5b4 | ||
|
|
cfc00068bd | ||
|
|
dd2851d576 | ||
|
|
10523241d8 | ||
|
|
82345bc09a | ||
|
|
7ad1d76356 | ||
|
|
b2a382efdb | ||
|
|
89c885909a | ||
|
|
8e1ada9e6f | ||
|
|
059d8c1a4e | ||
|
|
c618a5d348 | ||
|
|
6de09e07a6 | ||
|
|
fa8271c5ac | ||
|
|
9c70a02a9c | ||
|
|
1def298119 | ||
|
|
2091c91fde | ||
|
|
375162b3c3 | ||
|
|
65c5592b8e | ||
|
|
c831c5b2bb | ||
|
|
5ed7853bb0 | ||
|
|
f44354e17f | ||
|
|
d0d479c1af | ||
|
|
03cc8c4b5d | ||
|
|
eca4f5ac73 | ||
|
|
1b2067f53d | ||
|
|
e8c53b07f2 | ||
|
|
c8f35d8d38 | ||
|
|
fdefb9e29a | ||
|
|
37b524f971 | ||
|
|
823e13ddf4 | ||
|
|
18c516698e | ||
|
|
d86321300a | ||
|
|
d336b51331 | ||
|
|
5f158ec039 | ||
|
|
db0a50bc40 | ||
|
|
24aa0e0a5b | ||
|
|
4c17a87606 | ||
|
|
d445b3ae57 | ||
|
|
59f15309ca | ||
|
|
f369164761 | ||
|
|
6bb0357c94 | ||
|
|
a83577d64f | ||
|
|
39e9839a04 | ||
|
|
78a1cd36b5 | ||
|
|
0a4001eba1 | ||
|
|
38a6d3eea7 | ||
|
|
1890cfcf82 | ||
|
|
8ab3444fdf | ||
|
|
953dbb7980 | ||
|
|
b2a2e96ea6 | ||
|
|
351d9bd317 | ||
|
|
f77e997619 | ||
|
|
f281714583 | ||
|
|
3dd61d12cd | ||
|
|
4d122d295c | ||
|
|
65434da75d | ||
|
|
7b3bc755a3 | ||
|
|
d88421ab03 | ||
|
|
af67c7c1de | ||
|
|
824707383b | ||
|
|
73cb716b3c | ||
|
|
5e01e9ac19 | ||
|
|
f3615a8aa5 | ||
|
|
7556851665 | ||
|
|
43d175d17a | ||
|
|
b70e080b59 | ||
|
|
57eacee4f4 | ||
|
|
c142e5d16a | ||
|
|
4b1f7febc7 | ||
|
|
f9e99f9534 | ||
|
|
1af2fcd492 | ||
|
|
f05c7d62bc | ||
|
|
1a807dfe68 | ||
|
|
589d43d9cd | ||
|
|
9b1b79f3f5 | ||
|
|
ad8b909ce9 | ||
|
|
80cc82a445 | ||
|
|
b4f5416dd9 | ||
|
|
eadb13d2e9 | ||
|
|
7f0d8e4288 | ||
|
|
9ccea16d45 | ||
|
|
a6a776f3d8 | ||
|
|
9481707a52 | ||
|
|
0e5434264f | ||
|
|
1ee268d33d | ||
|
|
ee91ac179c | ||
|
|
822a0f0435 | ||
|
|
54283f3ed4 | ||
|
|
20332b278d | ||
|
|
c061d4f237 | ||
|
|
f6608a8805 | ||
|
|
426854e7bc | ||
|
|
463b072b12 | ||
|
|
d0b849c86d | ||
|
|
cb8d568cf9 | ||
|
|
463d5a8fde | ||
|
|
91753cae59 | ||
|
|
c7b48bd42d | ||
|
|
0ee9076ffe | ||
|
|
10fe904d88 | ||
|
|
9f3c0a8556 | ||
|
|
65dd5543f6 | ||
|
|
8ee69f299c |
@@ -49,14 +49,15 @@ steps:
|
||||
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
|
||||
- "python3.5 -m pip install tox"
|
||||
- "tox -e py35-old,codecov"
|
||||
label: ":python: 3.5 / SQLite / Old Deps"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.5"
|
||||
image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
@@ -117,8 +118,10 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -134,8 +137,10 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 9.5"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -151,8 +156,10 @@ steps:
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 11"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
TRIAL_FLAGS: "-j 8"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
@@ -173,11 +180,13 @@ steps:
|
||||
queue: "medium"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
@@ -192,11 +201,13 @@ steps:
|
||||
POSTGRES: "1"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
@@ -210,14 +221,17 @@ steps:
|
||||
env:
|
||||
POSTGRES: "1"
|
||||
WORKERS: "1"
|
||||
BLACKLIST: "synapse-blacklist-with-workers"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
soft_fail: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
||||
# sytest-synapse docker images.
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
SYNAPSE_DIR=`pwd`
|
||||
else
|
||||
SYNAPSE_DIR="/src"
|
||||
fi
|
||||
|
||||
# Attempt to find a sytest to use.
|
||||
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
||||
if [ -d "/sytest" ]; then
|
||||
# If the user has mounted in a SyTest checkout, use that.
|
||||
echo "Using local sytests..."
|
||||
|
||||
# create ourselves a working directory and dos2unix some scripts therein
|
||||
mkdir -p /work/jenkins
|
||||
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
||||
dos2unix -n "/sytest/$i" "/work/$i"
|
||||
done
|
||||
ln -sf /sytest/tests /work
|
||||
ln -sf /sytest/keys /work
|
||||
SYTEST_LIB="/sytest/lib"
|
||||
else
|
||||
if [ -n "BUILDKITE_BRANCH" ]
|
||||
then
|
||||
branch_name=$BUILDKITE_BRANCH
|
||||
else
|
||||
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
||||
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
||||
fi
|
||||
|
||||
# Try and fetch the branch
|
||||
echo "Trying to get same-named sytest branch..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
||||
# Probably a 404, fall back to develop
|
||||
echo "Using develop instead..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
||||
}
|
||||
|
||||
mkdir -p /work
|
||||
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
||||
SYTEST_LIB="/work/lib"
|
||||
fi
|
||||
|
||||
cd /work
|
||||
|
||||
# PostgreSQL setup
|
||||
if [ -n "$POSTGRES" ]
|
||||
then
|
||||
export PGUSER=postgres
|
||||
export POSTGRES_DB_1=pg1
|
||||
export POSTGRES_DB_2=pg2
|
||||
|
||||
# Start the database
|
||||
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||
|
||||
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
||||
jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
# Make the test databases for the two Synapse servers that will be spun up
|
||||
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
||||
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
||||
|
||||
fi
|
||||
|
||||
if [ -n "$OFFLINE" ]; then
|
||||
# if we're in offline mode, just put synapse into the virtualenv, and
|
||||
# hope that the deps are up-to-date.
|
||||
#
|
||||
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
||||
# so we just run setup.py explicitly.)
|
||||
#
|
||||
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
||||
else
|
||||
# We've already created the virtualenv, but lets double check we have all
|
||||
# deps.
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
||||
lxml psycopg2 coverage codecov tap.py
|
||||
|
||||
# Make sure all Perl deps are installed -- this is done in the docker build
|
||||
# so will only install packages added since the last Docker build
|
||||
./install-deps.pl
|
||||
fi
|
||||
|
||||
|
||||
# Run the tests
|
||||
>&2 echo "+++ Running tests"
|
||||
|
||||
RUN_TESTS=(
|
||||
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
||||
)
|
||||
|
||||
TEST_STATUS=0
|
||||
|
||||
if [ -n "$WORKERS" ]; then
|
||||
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
||||
else
|
||||
RUN_TESTS+=(-I Synapse)
|
||||
fi
|
||||
|
||||
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
||||
else
|
||||
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
||||
fi
|
||||
|
||||
>&2 echo "--- Copying assets"
|
||||
|
||||
# Copy out the logs
|
||||
mkdir -p /logs
|
||||
cp results.tap /logs/results.tap
|
||||
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
||||
|
||||
# Upload coverage to codecov and upload files, if running on Buildkite
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
/venv/bin/coverage combine || true
|
||||
/venv/bin/coverage xml || true
|
||||
/venv/bin/codecov -X gcov -f coverage.xml
|
||||
|
||||
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
||||
tar xvf buildkite.tar.gz
|
||||
chmod +x ./buildkite-agent
|
||||
|
||||
# Upload the files
|
||||
./buildkite-agent artifact upload "/logs/**/*.log*"
|
||||
./buildkite-agent artifact upload "/logs/results.tap"
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
# Annotate, if failure
|
||||
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
exit $TEST_STATUS
|
||||
30
.buildkite/worker-blacklist
Normal file
30
.buildkite/worker-blacklist
Normal file
@@ -0,0 +1,30 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
|
||||
Message history can be paginated
|
||||
|
||||
Can re-join room if re-invited
|
||||
|
||||
/upgrade creates a new room
|
||||
|
||||
The only membership state included in an initial sync is for all the senders in the timeline
|
||||
|
||||
Local device key changes get to remote servers
|
||||
|
||||
If remote user leaves room we no longer receive device updates
|
||||
|
||||
Forgotten room messages cannot be paginated
|
||||
|
||||
Inbound federation can get public room list
|
||||
|
||||
Members from the gap are included in gappy incr LL sync
|
||||
|
||||
Leaves are present in non-gapped incremental syncs
|
||||
|
||||
Old leaves are present in gapped incremental syncs
|
||||
|
||||
User sees updates to presence from other users in the incremental sync.
|
||||
|
||||
Gapped incremental syncs include all state changes
|
||||
|
||||
Old members are included in gappy incr LL sync if they start speaking
|
||||
@@ -1,5 +1,4 @@
|
||||
comment:
|
||||
layout: "diff"
|
||||
comment: off
|
||||
|
||||
coverage:
|
||||
status:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,6 +16,7 @@ _trial_temp*/
|
||||
/*.log
|
||||
/*.log.config
|
||||
/*.pid
|
||||
/.python-version
|
||||
/*.signing.key
|
||||
/env/
|
||||
/homeserver*.yaml
|
||||
|
||||
152
CHANGES.md
152
CHANGES.md
@@ -1,3 +1,155 @@
|
||||
Synapse 1.2.1 (2019-07-26)
|
||||
==========================
|
||||
|
||||
Security update
|
||||
---------------
|
||||
|
||||
This release includes *four* security fixes:
|
||||
|
||||
- Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767))
|
||||
- Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961))
|
||||
- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @dylangerdaly for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744))
|
||||
- Fix a vulnerability where a federated server could spoof read-receipts from
|
||||
users on other servers. Thanks to @dylangerdaly for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743))
|
||||
|
||||
Additionally, the following fix was in Synapse **1.2.0**, but was not correctly
|
||||
identified during the original release:
|
||||
|
||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||
|
||||
Synapse 1.2.0 (2019-07-25)
|
||||
==========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.2.0rc2 (2019-07-24)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734))
|
||||
|
||||
|
||||
Synapse 1.2.0rc1 (2019-07-22)
|
||||
=============================
|
||||
|
||||
Security fixes
|
||||
--------------
|
||||
|
||||
This update included a security fix which was initially incorrectly flagged as
|
||||
a regular bug fix.
|
||||
|
||||
- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712))
|
||||
- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589))
|
||||
- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597))
|
||||
- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613))
|
||||
- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623))
|
||||
- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626))
|
||||
- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660))
|
||||
- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674))
|
||||
- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609))
|
||||
- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621))
|
||||
- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629))
|
||||
- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638))
|
||||
- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644))
|
||||
- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654))
|
||||
- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658))
|
||||
- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619))
|
||||
- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397))
|
||||
- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651))
|
||||
- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661))
|
||||
- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617))
|
||||
- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611))
|
||||
- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616))
|
||||
- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622))
|
||||
- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627))
|
||||
- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628))
|
||||
- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630))
|
||||
- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636))
|
||||
- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637))
|
||||
- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639))
|
||||
- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640))
|
||||
- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641))
|
||||
- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642))
|
||||
- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643))
|
||||
- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645))
|
||||
- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655))
|
||||
- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656))
|
||||
- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657))
|
||||
- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659))
|
||||
- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664))
|
||||
- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673))
|
||||
- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689))
|
||||
- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703))
|
||||
|
||||
|
||||
Synapse 1.1.0 (2019-07-04)
|
||||
==========================
|
||||
|
||||
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
|
||||
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
|
||||
|
||||
This release also deprecates the use of environment variables to configure the
|
||||
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
|
||||
for more details.
|
||||
|
||||
No changes since 1.1.0rc2.
|
||||
|
||||
|
||||
Synapse 1.1.0rc2 (2019-07-03)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
|
||||
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
|
||||
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
|
||||
|
||||
|
||||
Synapse 1.1.0rc1 (2019-07-02)
|
||||
=============================
|
||||
|
||||
|
||||
@@ -30,11 +30,10 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
|
||||
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
|
||||
Buildkite builds need to be authorised by a maintainer. If your change breaks
|
||||
the build, this will be shown in GitHub, so please keep an eye on the pull
|
||||
request for feedback.
|
||||
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
|
||||
continuous integration. Buildkite builds need to be authorised by a
|
||||
maintainer. If your change breaks the build, this will be shown in GitHub, so
|
||||
please keep an eye on the pull request for feedback.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
@@ -70,13 +69,21 @@ All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
To create a changelog entry, make a new file in the ``changelog.d`` file named
|
||||
in the format of ``PRnumber.type``. The type can be one of the following:
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
* ``feature``.
|
||||
* ``bugfix``.
|
||||
* ``docker`` (for updates to the Docker image).
|
||||
* ``doc`` (for updates to the documentation).
|
||||
* ``removal`` (also used for deprecations).
|
||||
* ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which should be a short
|
||||
description of your change in the same style as the rest of our `changelog
|
||||
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
|
||||
contain Markdown formatting, and should end with a full stop ('.') for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
@@ -33,6 +33,7 @@ exclude Dockerfile
|
||||
exclude .dockerignore
|
||||
exclude test_postgresql.sh
|
||||
exclude .editorconfig
|
||||
exclude sytest-blacklist
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install --no-pep-517 -e .[all]
|
||||
python -m pip install --no-use-pep517 -e .[all]
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
@@ -49,6 +49,13 @@ returned by the Client-Server API:
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v1.2.0
|
||||
===================
|
||||
|
||||
Some counter metrics have been renamed, with the old names deprecated. See
|
||||
`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
|
||||
for details.
|
||||
|
||||
Upgrading to v1.1.0
|
||||
===================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Update github templates.
|
||||
1
changelog.d/5678.removal
Normal file
1
changelog.d/5678.removal
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration.
|
||||
1
changelog.d/5686.feature
Normal file
1
changelog.d/5686.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use `M_USER_DEACTIVATED` instead of `M_UNKNOWN` for errcode when a deactivated user attempts to login.
|
||||
1
changelog.d/5693.bugfix
Normal file
1
changelog.d/5693.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
1
changelog.d/5694.misc
Normal file
1
changelog.d/5694.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make Jaeger fully configurable.
|
||||
1
changelog.d/5695.misc
Normal file
1
changelog.d/5695.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add precautionary measures to prevent future abuse of `window.opener` in default welcome page.
|
||||
1
changelog.d/5706.misc
Normal file
1
changelog.d/5706.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5713.misc
Normal file
1
changelog.d/5713.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve caching when fetching `get_filtered_current_state_ids`.
|
||||
1
changelog.d/5715.misc
Normal file
1
changelog.d/5715.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't accept opentracing data from clients.
|
||||
1
changelog.d/5717.misc
Normal file
1
changelog.d/5717.misc
Normal file
@@ -0,0 +1 @@
|
||||
Speed up PostgreSQL unit tests in CI.
|
||||
1
changelog.d/5719.misc
Normal file
1
changelog.d/5719.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update the coding style document.
|
||||
1
changelog.d/5720.misc
Normal file
1
changelog.d/5720.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve database query performance when recording retry intervals for remote hosts.
|
||||
1
changelog.d/5722.misc
Normal file
1
changelog.d/5722.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a set of opentracing utils.
|
||||
1
changelog.d/5724.bugfix
Normal file
1
changelog.d/5724.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix stack overflow in server key lookup code.
|
||||
1
changelog.d/5725.bugfix
Normal file
1
changelog.d/5725.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
start.sh no longer uses deprecated cli option.
|
||||
1
changelog.d/5729.removal
Normal file
1
changelog.d/5729.removal
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration.
|
||||
1
changelog.d/5730.misc
Normal file
1
changelog.d/5730.misc
Normal file
@@ -0,0 +1 @@
|
||||
Cache result of get_version_string to reduce overhead of `/version` federation requests.
|
||||
1
changelog.d/5731.misc
Normal file
1
changelog.d/5731.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return 'user_type' in admin API user endpoints results.
|
||||
1
changelog.d/5732.feature
Normal file
1
changelog.d/5732.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify.
|
||||
1
changelog.d/5733.misc
Normal file
1
changelog.d/5733.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't package the sytest test blacklist file.
|
||||
1
changelog.d/5736.misc
Normal file
1
changelog.d/5736.misc
Normal file
@@ -0,0 +1 @@
|
||||
Replace uses of returnValue with plain return, as returnValue is not needed on Python 3.
|
||||
1
changelog.d/5738.misc
Normal file
1
changelog.d/5738.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5740.misc
Normal file
1
changelog.d/5740.misc
Normal file
@@ -0,0 +1 @@
|
||||
Blacklist some flakey tests in worker mode.
|
||||
1
changelog.d/5743.bugfix
Normal file
1
changelog.d/5743.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Log when we receive an event receipt from an unexpected origin.
|
||||
1
changelog.d/5746.misc
Normal file
1
changelog.d/5746.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5749.misc
Normal file
1
changelog.d/5749.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix some error cases in the caching layer.
|
||||
1
changelog.d/5750.misc
Normal file
1
changelog.d/5750.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a prometheus metric for pending cache lookups.
|
||||
1
changelog.d/5752.misc
Normal file
1
changelog.d/5752.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5753.misc
Normal file
1
changelog.d/5753.misc
Normal file
@@ -0,0 +1 @@
|
||||
Stop trying to fetch events with event_id=None.
|
||||
1
changelog.d/5754.feature
Normal file
1
changelog.d/5754.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead.
|
||||
1
changelog.d/5768.misc
Normal file
1
changelog.d/5768.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert RedactionTestCase to modern test style.
|
||||
1
changelog.d/5770.misc
Normal file
1
changelog.d/5770.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5774.misc
Normal file
1
changelog.d/5774.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5775.bugfix
Normal file
1
changelog.d/5775.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix debian packaging scripts to correctly build sid packages.
|
||||
1
changelog.d/5780.misc
Normal file
1
changelog.d/5780.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow looping calls to be given arguments.
|
||||
1
changelog.d/5782.removal
Normal file
1
changelog.d/5782.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove non-functional 'expire_access_token' setting.
|
||||
1
changelog.d/5783.feature
Normal file
1
changelog.d/5783.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers.
|
||||
1
changelog.d/5785.misc
Normal file
1
changelog.d/5785.misc
Normal file
@@ -0,0 +1 @@
|
||||
Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO.
|
||||
1
changelog.d/5787.misc
Normal file
1
changelog.d/5787.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests.
|
||||
1
changelog.d/5788.bugfix
Normal file
1
changelog.d/5788.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Correctly handle redactions of redactions.
|
||||
1
changelog.d/5789.bugfix
Normal file
1
changelog.d/5789.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
1
changelog.d/5790.misc
Normal file
1
changelog.d/5790.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove some spurious exceptions from the logs where we failed to talk to a remote server.
|
||||
1
changelog.d/5792.misc
Normal file
1
changelog.d/5792.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5793.misc
Normal file
1
changelog.d/5793.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5794.misc
Normal file
1
changelog.d/5794.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve performance when making `.well-known` requests by sharing the SSL options between requests.
|
||||
1
changelog.d/5796.misc
Normal file
1
changelog.d/5796.misc
Normal file
@@ -0,0 +1 @@
|
||||
Disable codecov GitHub comments on PRs.
|
||||
1
changelog.d/5798.bugfix
Normal file
1
changelog.d/5798.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions.
|
||||
1
changelog.d/5801.misc
Normal file
1
changelog.d/5801.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't allow clients to send tombstone events that reference the room it's sent in.
|
||||
1
changelog.d/5802.misc
Normal file
1
changelog.d/5802.misc
Normal file
@@ -0,0 +1 @@
|
||||
Deny redactions of events sent in a different room.
|
||||
1
changelog.d/5804.bugfix
Normal file
1
changelog.d/5804.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix check that tombstone is a state event in push rules.
|
||||
1
changelog.d/5805.misc
Normal file
1
changelog.d/5805.misc
Normal file
@@ -0,0 +1 @@
|
||||
Deny sending well known state types as non-state events.
|
||||
1
changelog.d/5806.bugfix
Normal file
1
changelog.d/5806.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix error when trying to login as a deactivated user when using a worker to handle login.
|
||||
1
changelog.d/5807.feature
Normal file
1
changelog.d/5807.feature
Normal file
@@ -0,0 +1 @@
|
||||
Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature.
|
||||
1
changelog.d/5808.misc
Normal file
1
changelog.d/5808.misc
Normal file
@@ -0,0 +1 @@
|
||||
Handle incorrectly encoded query params correctly by returning a 400.
|
||||
1
changelog.d/5810.misc
Normal file
1
changelog.d/5810.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return 502 not 500 when failing to reach any remote server.
|
||||
1
changelog.d/5825.bugfix
Normal file
1
changelog.d/5825.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix bug where user `/sync` stream could get wedged in rare circumstances.
|
||||
1
changelog.d/5826.misc
Normal file
1
changelog.d/5826.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce global pauses in the events stream caused by expensive state resolution during persistence.
|
||||
1
changelog.d/5836.misc
Normal file
1
changelog.d/5836.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a lower bound to well-known lookup cache time to avoid repeated lookups.
|
||||
1
changelog.d/5839.bugfix
Normal file
1
changelog.d/5839.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
The purge_remote_media.sh script was fixed.
|
||||
1
changelog.d/5843.misc
Normal file
1
changelog.d/5843.misc
Normal file
@@ -0,0 +1 @@
|
||||
Whitelist history visbility sytests in worker mode tests.
|
||||
@@ -1,7 +1,7 @@
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
@@ -12,7 +12,7 @@ formatters:
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
@@ -35,7 +35,7 @@ handlers:
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
@@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
|
||||
|
||||
from synapse.app.homeserver import SynapseHomeServer
|
||||
|
||||
# from synapse.util.logutils import log_function
|
||||
# from synapse.logging.utils import log_function
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import log
|
||||
|
||||
@@ -51,4 +51,4 @@ TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id
|
||||
# finally start pruning media:
|
||||
###############################################################################
|
||||
set -x # for debugging the generated string
|
||||
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
|
||||
@@ -4,7 +4,8 @@ After=matrix-synapse.service
|
||||
BindsTo=matrix-synapse.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
Description=Synapse Matrix Homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
|
||||
@@ -8,7 +8,7 @@ formatters:
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
||||
@@ -14,7 +14,9 @@
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=on-abort
|
||||
|
||||
User=synapse
|
||||
|
||||
26
debian/changelog
vendored
26
debian/changelog
vendored
@@ -1,9 +1,31 @@
|
||||
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.2.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.2.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Jul 2019 11:32:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.2.0) stable; urgency=medium
|
||||
|
||||
[ Amber Brown ]
|
||||
* Update logging config defaults to match API changes in Synapse.
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Add Recommends and Depends for some libraries which you probably want.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.2.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Jul 2019 14:10:07 +0100
|
||||
|
||||
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
||||
|
||||
[ Silke Hofstra ]
|
||||
* Include systemd-python to allow logging to the systemd journal.
|
||||
|
||||
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.1.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||
|
||||
|
||||
7
debian/control
vendored
7
debian/control
vendored
@@ -2,16 +2,20 @@ Source: matrix-synapse-py3
|
||||
Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
Build-Depends:
|
||||
debhelper (>= 9),
|
||||
dh-systemd,
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
python3-setuptools,
|
||||
python3-pip,
|
||||
python3-venv,
|
||||
libsqlite3-dev,
|
||||
tar,
|
||||
Standards-Version: 3.9.8
|
||||
Homepage: https://github.com/matrix-org/synapse
|
||||
@@ -28,9 +32,12 @@ Depends:
|
||||
debconf,
|
||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
${synapse:pydepends},
|
||||
# some of our scripts use perl, but none of them are important,
|
||||
# so we put perl:Depends in Suggests rather than Depends.
|
||||
Recommends:
|
||||
${shlibs1:Recommends},
|
||||
Suggests:
|
||||
sqlite3,
|
||||
${perl:Depends},
|
||||
|
||||
2
debian/log.yaml
vendored
2
debian/log.yaml
vendored
@@ -7,7 +7,7 @@ formatters:
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
||||
14
debian/rules
vendored
14
debian/rules
vendored
@@ -3,15 +3,29 @@
|
||||
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
||||
#
|
||||
|
||||
# assume we only have one package
|
||||
PACKAGE_NAME:=`dh_listpackages`
|
||||
|
||||
override_dh_systemd_enable:
|
||||
dh_systemd_enable --name=matrix-synapse
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit --name=matrix-synapse
|
||||
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
override_dh_shlibdeps:
|
||||
# make the postgres package's dependencies a recommendation
|
||||
# rather than a hard dependency.
|
||||
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
|
||||
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
|
||||
-pshlibs1 -dRecommends
|
||||
|
||||
# all the other dependencies can be normal 'Depends' requirements,
|
||||
# except for PIL's, which is self-contained and which confuses
|
||||
# dpkg-shlibdeps.
|
||||
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
|
||||
|
||||
override_dh_virtualenv:
|
||||
./debian/build_virtualenv
|
||||
|
||||
@@ -29,7 +29,7 @@ for port in 8080 8081 8082; do
|
||||
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
|
||||
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||
@@ -43,7 +43,7 @@ for port in 8080 8081 8082; do
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
|
||||
- port: $port
|
||||
tls: false
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
@@ -68,7 +68,7 @@ for port in 8080 8081 8082; do
|
||||
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
||||
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
||||
@@ -120,7 +120,6 @@ for port in 8080 8081 8082; do
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
-vv \
|
||||
|
||||
popd
|
||||
done
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
||||
###
|
||||
### Stage 0: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
### Stage 1: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
||||
|
||||
# xmlsec is required for saml support
|
||||
RUN apk add --no-cache --virtual .runtime_deps \
|
||||
|
||||
@@ -42,7 +42,15 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
||||
###
|
||||
FROM ${distro}
|
||||
|
||||
# Get the distro we want to pull from as a dynamic build variable
|
||||
# (We need to define it in each build stage)
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
set -ex
|
||||
|
||||
DIST=`lsb_release -c -s`
|
||||
# Get the codename from distro env
|
||||
DIST=`cut -d ':' -f2 <<< $distro`
|
||||
|
||||
# we get a read-only copy of the source: make a writeable copy
|
||||
cp -aT /synapse/source /synapse/build
|
||||
|
||||
@@ -207,22 +207,3 @@ perspectives:
|
||||
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
{% if SYNAPSE_SMTP_HOST %}
|
||||
email:
|
||||
enable_notifs: false
|
||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
||||
require_transport_security: False
|
||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||
app_name: Matrix
|
||||
# if template_dir is unset, uses the example templates that are part of
|
||||
# the Synapse distribution.
|
||||
#template_dir: res/templates
|
||||
notif_template_html: notif_mail.html
|
||||
notif_template_text: notif_mail.txt
|
||||
notif_for_new_users: True
|
||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
||||
{% endif %}
|
||||
|
||||
@@ -2,11 +2,11 @@ version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
# Code Style
|
||||
Code Style
|
||||
==========
|
||||
|
||||
Formatting tools
|
||||
----------------
|
||||
|
||||
The Synapse codebase uses a number of code formatting tools in order to
|
||||
quickly and automatically check for formatting (and sometimes logical) errors
|
||||
@@ -6,20 +10,20 @@ in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
|
||||
## Formatting tools
|
||||
- **black**
|
||||
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/) as an
|
||||
opinionated code formatter, ensuring all comitted code is properly
|
||||
formatted.
|
||||
The Synapse codebase uses `black <https://pypi.org/project/black/>`_ as an
|
||||
opinionated code formatter, ensuring all comitted code is properly
|
||||
formatted.
|
||||
|
||||
First install ``black`` with::
|
||||
First install ``black`` with::
|
||||
|
||||
pip install --upgrade black
|
||||
pip install --upgrade black
|
||||
|
||||
Have ``black`` auto-format your code (it shouldn't change any
|
||||
functionality) with::
|
||||
Have ``black`` auto-format your code (it shouldn't change any functionality)
|
||||
with::
|
||||
|
||||
black . --exclude="\.tox|build|env"
|
||||
black . --exclude="\.tox|build|env"
|
||||
|
||||
- **flake8**
|
||||
|
||||
@@ -54,17 +58,16 @@ functionality is supported in your editor for a more convenient development
|
||||
workflow. It is not, however, recommended to run ``flake8`` on save as it
|
||||
takes a while and is very resource intensive.
|
||||
|
||||
## General rules
|
||||
General rules
|
||||
-------------
|
||||
|
||||
- **Naming**:
|
||||
|
||||
- Use camel case for class and type names
|
||||
- Use underscores for functions and variables.
|
||||
|
||||
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||
|
||||
- **Comments**: should follow the `google code style
|
||||
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
- **Docstrings**: should follow the `google code style
|
||||
<https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings>`_.
|
||||
This is so that we can generate documentation with `sphinx
|
||||
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples
|
||||
@@ -73,6 +76,8 @@ takes a while and is very resource intensive.
|
||||
|
||||
- **Imports**:
|
||||
|
||||
- Imports should be sorted by ``isort`` as described above.
|
||||
|
||||
- Prefer to import classes and functions rather than packages or modules.
|
||||
|
||||
Example::
|
||||
@@ -92,25 +97,84 @@ takes a while and is very resource intensive.
|
||||
This goes against the advice in the Google style guide, but it means that
|
||||
errors in the name are caught early (at import time).
|
||||
|
||||
- Multiple imports from the same package can be combined onto one line::
|
||||
|
||||
from synapse.types import GroupID, RoomID, UserID
|
||||
|
||||
An effort should be made to keep the individual imports in alphabetical
|
||||
order.
|
||||
|
||||
If the list becomes long, wrap it with parentheses and split it over
|
||||
multiple lines.
|
||||
|
||||
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||
imports should be grouped in the following order, with a blank line between
|
||||
each group:
|
||||
|
||||
1. standard library imports
|
||||
2. related third party imports
|
||||
3. local application/library specific imports
|
||||
|
||||
- Imports within each group should be sorted alphabetically by module name.
|
||||
|
||||
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||
imports (``from .types import UserID``).
|
||||
|
||||
Configuration file format
|
||||
-------------------------
|
||||
|
||||
The `sample configuration file <./sample_config.yaml>`_ acts as a reference to
|
||||
Synapse's configuration options for server administrators. Remember that many
|
||||
readers will be unfamiliar with YAML and server administration in general, so
|
||||
that it is important that the file be as easy to understand as possible, which
|
||||
includes following a consistent format.
|
||||
|
||||
Some guidelines follow:
|
||||
|
||||
* Sections should be separated with a heading consisting of a single line
|
||||
prefixed and suffixed with ``##``. There should be **two** blank lines
|
||||
before the section header, and **one** after.
|
||||
|
||||
* Each option should be listed in the file with the following format:
|
||||
|
||||
* A comment describing the setting. Each line of this comment should be
|
||||
prefixed with a hash (``#``) and a space.
|
||||
|
||||
The comment should describe the default behaviour (ie, what happens if
|
||||
the setting is omitted), as well as what the effect will be if the
|
||||
setting is changed.
|
||||
|
||||
Often, the comment end with something like "uncomment the
|
||||
following to \<do action>".
|
||||
|
||||
* A line consisting of only ``#``.
|
||||
|
||||
* A commented-out example setting, prefixed with only ``#``.
|
||||
|
||||
For boolean (on/off) options, convention is that this example should be
|
||||
the *opposite* to the default (so the comment will end with "Uncomment
|
||||
the following to enable [or disable] \<feature\>." For other options,
|
||||
the example should give some non-default value which is likely to be
|
||||
useful to the reader.
|
||||
|
||||
* There should be a blank line between each option.
|
||||
|
||||
* Where several settings are grouped into a single dict, *avoid* the
|
||||
convention where the whole block is commented out, resulting in comment
|
||||
lines starting ``# #``, as this is hard to read and confusing to
|
||||
edit. Instead, leave the top-level config option uncommented, and follow
|
||||
the conventions above for sub-options. Ensure that your code correctly
|
||||
handles the top-level option being set to ``None`` (as it will be if no
|
||||
sub-options are enabled).
|
||||
|
||||
* Lines should be wrapped at 80 characters.
|
||||
|
||||
Example::
|
||||
|
||||
## Frobnication ##
|
||||
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
|
||||
Note that the sample configuration is generated from the synapse code and is
|
||||
maintained by a script, ``scripts-dev/generate_sample_config``. Making sure
|
||||
that the output from this script matches the desired format is left as an
|
||||
exercise for the reader!
|
||||
|
||||
332
docs/federation_side_bus.md
Normal file
332
docs/federation_side_bus.md
Normal file
@@ -0,0 +1,332 @@
|
||||
%%%
|
||||
title = "The Federation Side Bus"
|
||||
abbrev = "federation-side-bus"
|
||||
docName = "federation-side-bus"
|
||||
ipr = "none"
|
||||
workgroup = "Synapse"
|
||||
|
||||
[seriesInfo]
|
||||
name = "RFC"
|
||||
stream = "IETF"
|
||||
status = "informational"
|
||||
value = "federation-side-bus"
|
||||
|
||||
[pi]
|
||||
toc = "yes"
|
||||
topblock = "yes"
|
||||
|
||||
[[author]]
|
||||
initials = "A."
|
||||
surname = "Brown"
|
||||
fullname = "Amber Brown"
|
||||
organization = "New Vector"
|
||||
[author.address]
|
||||
email = "amberb@matrix.org"
|
||||
%%%
|
||||
|
||||
.# Abstract
|
||||
|
||||
Proposal for the "Federation Side Bus" project. Proposed refactoring of federation transport code as well as externally communicating code. Proposed implementation of a message-bus style system for external communication. Proposed implementation of a prioritisation system covering different remote hosts based on liveliness as well as prioritisation of outgoing requests when experiencing backpressure.
|
||||
|
||||
{mainmatter}
|
||||
|
||||
# Introduction
|
||||
|
||||
On smaller machines, Synapse has problems when interacting with the federation in large rooms. Existing experience had pointed at state resolution being the performance killer, but further research with small homeservers has revealed the performance problems when communicating with many servers. The linear characteristics of having more servers in federation turns into a significant cliff in the realm of 200 or more servers on low-powered hardware, causing a "meltdown" and causing cascading failures as the server's non-responsiveness causes timeouts to clients and other servers.
|
||||
|
||||
## Terminology
|
||||
|
||||
The keywords **MUST**, **MUST NOT**, **REQUIRED**, **SHALL**, **SHALL NOT**, **SHOULD**, **SHOULD NOT**, **RECOMMENDED**, **MAY**, and **OPTIONAL**, when they appear in this document, are to be interpreted as described in [@!RFC2119].
|
||||
|
||||
Additionally, the key words "**MIGHT**", "**COULD**", "**MAY WISH TO**", "**WOULD PROBABLY**", "**SHOULD CONSIDER**", and "**MUST (BUT WE KNOW YOU WON'T)**" in this document are to interpreted as described in [@!RFC6919].
|
||||
|
||||
The keywords **PDU**, **EDU**, and **QUERY** in this document are to be interpreted as described in the Matrix Server to Server Specification [@!s2sapi].
|
||||
|
||||
**EXTERNAL COMMUNICATION** is defined as outgoing communication with another logical service, such as a web server or chat bot. Communication with the configured database, the filesystem, or with workers is not included in this definition.
|
||||
|
||||
**FEDERATION REQUESTS** are defined as any HTTP API call in the Matrix Server-to-Server specification, including PDUs, EDUs, or queries.
|
||||
|
||||
**DEFERRED** can mean either the literal Twisted Deferred, or a native coroutine that can await Deferreds. Which is used in the code depends on its use of native coroutines. APIs SHOULD try and implement native coroutines where possible, but they are described as "Deferreds" for brevity.
|
||||
|
||||
# The Status Quo
|
||||
|
||||
Synapse currently performs poorly under the following situations:
|
||||
|
||||
- Joining a room with many servers, where the presence storm can cause Synapse to lock up and time out the room join to the client, making it seem as if joining the room "failed" when it did not
|
||||
- Sending a message in a room with many servers (sending PDUs) causes CPU and RAM spikes
|
||||
- Presence and typing in a room with many servers (sending EDUs) causes CPU and RAM spikes
|
||||
- Viewing the user list of a large room, causing many concurrent profile fetches
|
||||
- Many users doing queries about remote users
|
||||
|
||||
This can be attributed to the following fundamental issues:
|
||||
|
||||
- Synapse's use of the network is unintelligent and is not aware of resource constraints (e.g. connection limits),
|
||||
- Synapse does not leverage persistent network connections and pipelining/HTTP 2.0,
|
||||
- Synapse does not gracefully degrade under pressure situations, but shows total system failure.
|
||||
|
||||
In addition, the following issues make it more difficult to fix the above without a comprehensive approach:
|
||||
|
||||
- Synapse does not assign a priority to hosts, meaning that any naive rate limiting (such as the transaction queue on `atleastfornow.net`) can cause a poor user experience as misbehaving/timing out hosts can take up a slot that a well-behaved server or servers with users that are being actively communicated with could use
|
||||
- Synapse does not assign a priority to requests, making a rudimentary rate limiting system difficult
|
||||
- Large parts of the codebase can make external requests independently
|
||||
- Synapse uses conventional HTTP clients that are poorly suited to the "message bus" style of usage that is required.
|
||||
|
||||
For large servers with workers, this can be mitigated somewhat by just throwing more hardware at the problem. For smaller ones, especially on constrained hardware (think ARM or shared hosting), this lack of rate limiting can cause hard spinning, swamping of resources, and total system failure.
|
||||
|
||||
## The Status Quo
|
||||
|
||||
Currently, Synapse talks over to other servers in the following places:
|
||||
|
||||
- Keyring (perspectives and origin), for fetching server keys
|
||||
- TransportLayerClient:
|
||||
- s.federation.federation_client
|
||||
- general federation queries
|
||||
- client key query
|
||||
- user device query
|
||||
- backfill
|
||||
- individual PDU collection
|
||||
- fetching remote room state
|
||||
- sending joins
|
||||
- sending invites
|
||||
- sending leaves
|
||||
- getting public rooms
|
||||
- querying auth chain
|
||||
- getting missing events
|
||||
- room complexity
|
||||
- s.federation.sender.transaction_manager
|
||||
- sending transactions
|
||||
- s.groups.attestations
|
||||
- fetching attestations
|
||||
- s.groups.groups_server
|
||||
- inviting and removing from group
|
||||
- s.groups.groups_client
|
||||
- fetching groups
|
||||
- fetching users from groups
|
||||
- Media Repo
|
||||
- download_remote_file (linearised)
|
||||
- Appservices
|
||||
- Identity services
|
||||
|
||||
TODO: More detail?
|
||||
|
||||
Furthermore, profiles and room directory use the general query API.
|
||||
|
||||
# Proposing The Federation Side Bus
|
||||
|
||||
The Federation Side Bus project remodels how Synapse approaches external communication. It draws naming parallels with the system bus design of personal computers and servers from the 1990s and 2000s, where the Front Side Bus was used to describe the communication interface between the CPU and its I/O systems (where the "back side bus" was instead between CPUs).
|
||||
|
||||
The core of the proposal is the definition of the "southbridge" (named for the I/O controller hub on a computer's FSB). The Southbridge is the only place where external communication is allowed to occur, and has a small but versatile interface for invoking said communication. This abstraction allows the Southbridge to be more intelligent about the use of network resources, as it can control all outbound data.
|
||||
|
||||
There are also additional abstractions and reworking of existing ones to make the internal logic more consistent. This is mostly focused on the reorganisation of the Federation code and the shifting of the Media Repo logic from being in REST servlets to handlers of their own. A reworking of ".well-known" resolution as well as hostname resolution in general is also proposed, with the end goal of increasing reliability and reducing the amount of code that needs to consider SRV/.well-known solving.
|
||||
|
||||
The Federation Side Bus will not alter Synapse's interaction with any of the Matrix standards, but will present the foundation for the future implementation of transports other than HTTP. HTTP/1.1 over TLS is targeted as the primary transport for Federation for this proposal, although HTTP/2.0 can be considered a "stretch goal" and desirable for its multiplexing and long-concurrent-connection qualities that would further reduce resource usage.
|
||||
|
||||
# Architecture
|
||||
|
||||
## The Southbridge
|
||||
|
||||
The Southbridge fully encapsulates all external communication (apart from DNS resolution). It consists of a number of queues, connection pools, and associated prioritisation and batching systems.
|
||||
|
||||
### Initial Federation Queue
|
||||
|
||||
Zero-length queue that routes Federation requests through to the host ranker.
|
||||
|
||||
### Host Ranker
|
||||
|
||||
Tracks the performance of outbound requests and routes new requests through the different queues based on Matrix host.
|
||||
|
||||
### Priority-Aware Federation Queue
|
||||
|
||||
A queue that enqueues events based on the Matrix host and requests a connection from the pool. When it has acquired a connection, it sends the events it has. If there is network pressure, the queue is responsible for giving up the connection based on a deadline. It is aware of federation semantics, and can intelligently collapse or discard EDUs or queries.
|
||||
|
||||
### The Request Queue
|
||||
|
||||
Holds HTTP requests and requests a connection from the connection pool to send them on. Used for general purpose queries (for example, .well-known lookups or URL previews).
|
||||
|
||||
### The Connection Pool
|
||||
|
||||
Holds open HTTP connections and is responsible for establishing new ones. Operates on a callback basis with the queues. Hands over a connection to the queue requesting it, and is told when the queue is done with it. Assigns deadlines for the queues to follow (e.g. time spent processing) to ensure fairness.
|
||||
|
||||
## Federation Subsystem
|
||||
|
||||
The Federation Subsystem sees a number of changes, mostly revolving around refactoring the existing code and formalising interfaces.
|
||||
|
||||
### Federation Resolver
|
||||
|
||||
Translates a Matrix homeserver hostname into "real" addresses that it can be contacted on. It is considered authoritive to the rest of the system.
|
||||
|
||||
### Externaliser
|
||||
|
||||
Queues a Federation request in the Message Queue after attaching the "real address" information.
|
||||
|
||||
### Controllers
|
||||
|
||||
Shifting of Federation logic into more logically separated modules, such as separating by purpose (messages, queries, presence, etc) for clarity.
|
||||
|
||||
## Media Subsystem
|
||||
|
||||
The functionality of the Media Repository REST APIs refactored into a handler.
|
||||
|
||||
## DNS Resolver
|
||||
|
||||
Resolves domain names to DNS records. Although informally implemented in Synapse, this new subsystem would centralise a lot of the functionality of the various DNS resolvers used.
|
||||
|
||||
# Implementation Plan
|
||||
|
||||
The implementation plan has three phases -- cleanup, plumbing, and optimising.
|
||||
|
||||
Cleanup focuses on shifting about existing code to fit the new model better. This involves implementing the Federation Resolver and cleaning up the media APIs.
|
||||
|
||||
Plumbing involves laying the groundwork for the changes. This involves writing a more controllable HTTP client, implementing the queueing and connection pool, and hooking it up to the existing Federation abstraction. The development of other queues and pools (like for URL previews, well-known lookups, etc) will also be done here, although can be done concurrently.
|
||||
|
||||
Optimising involves using these abstractions to allow Synapse to operate with network activity restrictions. This includes adding rate limiting, EDU collapsing,
|
||||
|
||||
## Decouple the Media APIs from the REST APIs
|
||||
|
||||
This should all be moved out into a handler of its own, instead of existing in the REST APIs.
|
||||
|
||||
## Implement the Federation Resolver
|
||||
|
||||
The base of the Federation Resolver can be implemented and placed in Synapse without much disruption.
|
||||
|
||||
Requirements:
|
||||
|
||||
- A ResolvedFederationAddress object which can encapsulate the results.
|
||||
- Simple, one-function-call API to fetch the information about the "real host".
|
||||
- Inputs:
|
||||
- Matrix server name.
|
||||
- Outputs:
|
||||
- Hostname to verify the TLS certificate against (which might not be the Matrix server name if .well-known is in use)
|
||||
- A list of IP addresses to contact the Matrix service by. This SHOULD contain priority and weight data to allow the connection pool to connect to preferred hosts, but MAY just be ordered in rank of preference without any priority or weight information.
|
||||
- MUST be encapsulated in a ResolvedFederationAddress object.
|
||||
|
||||
## Implement the HTTP/1.1 Transport
|
||||
|
||||
Implement in the current MatrixFederationAgent and SimpleHTTPClient, with a basic connection pool.
|
||||
|
||||
The justification for this is that the current HTTP client libraries rely on controlling the connection itself, while we want to operate on a lower level and control the connection ourselves, and give it to the client instead. It represents an inversion of the concerns, which is why we have to provide this part ourselves.
|
||||
|
||||
This is not a large asking, as the h11 library implements all the logic (and is a much more solid HTTP state machine than Twisted's current HTTP Agent implementation). If it implements IAgent, we may wish to contribute this up to Twisted.
|
||||
|
||||
Requirements:
|
||||
|
||||
- A HTTP/1.1 compliant transport.
|
||||
- SHOULD utilise the h11 library.
|
||||
- MUST support HTTP/1.1 keep-alive, but MUST NOT send multiple requests at once (pipelining).
|
||||
- SHOULD implement Twisted's IAgent/IResponse interface.
|
||||
- MUST take a TCP connection as an argument. The client MUST NOT instantiate the connection itself.
|
||||
- A basic connection pool.
|
||||
- MUST implement a method to request a connection from the ResolvedFederationAddress object that returns a Deferred resolving to the TCP connection.
|
||||
- MAY use the first IP listed in the ResolvedFederationAddress (matching current behaviour).
|
||||
- MUST verify the TLS matches the hostname in the ResolvedFederationAddress when the connection is made.
|
||||
- MUST return an error to the connection requests if the TLS connection fails.
|
||||
- SHOULD keep connections around until they time out, and serve them to subsequent requests if they are alive.
|
||||
- SHOULD NOT implement any form of rate limiting, as that will be implemented later.
|
||||
- MatrixFederationClient MUST use this connection pool and transport in place of treq.
|
||||
- MatrixFederationClient MUST query the Federation Resolver for the ResolvedFederationAddress to use.
|
||||
- Users of the MatrixFederationClient MUST NOT call the FederationResolver before making the request.
|
||||
|
||||
Questions:
|
||||
|
||||
- Do we need to support HTTP/1.0?
|
||||
- I don't think it's realistically required, and is expensive. The specification lists "HTTP/1.1" specifically in the examples, but does not call out HTTP/1.1 as the minimum supported version.
|
||||
|
||||
## Implement the Federation Queue
|
||||
|
||||
Implement the Federation Queue API. This Queue is not used at this stage.
|
||||
|
||||
Requirements:
|
||||
|
||||
- FederationResponse object
|
||||
- MUST be the root interface for the purposes of typing.
|
||||
- MUST have a common "status code" attribute with the numerical code and description.
|
||||
- FederationErrorResponse object
|
||||
- MUST implement FederationResponse
|
||||
- MUST have errcode and error from the JSON body as attributes, and all other keys in an 'other' mapping.
|
||||
- FederationQueryResponse object
|
||||
- MUST implement FederationResponse
|
||||
- MAY have further subclasses that implement particular responses to queries.
|
||||
- MUST have the JSON response as an attribute.
|
||||
- FederationTransactionResponse object
|
||||
- MUST implement FederationResponse
|
||||
- MUST have the PDU processing results as an attribute.
|
||||
|
||||
- OutgoingEDU object
|
||||
- MUST have edu_type and content as attributes.
|
||||
- MUST have the time that it was created.
|
||||
- OutgoingPDU object
|
||||
- MUST have a content attribute which contains the PDU data.
|
||||
- OutgoingQuery object
|
||||
- MUST have a template of the path.
|
||||
- MUST NOT add query or body parameters to the path.
|
||||
- MUST store the path, query, and JSON body arguments.
|
||||
- MUST implement a method that returns the fully resolved path with query arguments and the body as a dictionary, for consumption by the Queue.
|
||||
- MAY have subclasses that create more usable instantiators based on the particular query.
|
||||
|
||||
- The base FederationQueue
|
||||
- MUST request a connection from the ConnectionPool to send requests.
|
||||
- MUST return the connection to the Connection Pool when it has sent its requests.
|
||||
- MUST NOT send more requests than were initially in its queue when the connection was granted from the Pool.
|
||||
- MUST create a HTTP Transport for its uses. It MUST destroy it after the connection is returned.
|
||||
- MUST be able to encode JSON bodies and create requests.
|
||||
- MUST be able to create a transaction from the EDUs/PDUs in the queue when it has a connection.
|
||||
- MAY collapse EDUs based on their time of creation or "cancelling out".
|
||||
- MAY remove EDUs from the queue when under queue pressure.
|
||||
- SHOULD send PDUs and EDUs in the order they were given. Future implementations MAY prioritise certain PDUs over others (e.g. direct messages).
|
||||
- MUST remove EDUs/PDUs that have been sent in a transaction from the queue.
|
||||
- MUST remove queries that have been given a response from the queue.
|
||||
- MAY retry queries that fail with transient errors instead of delivering the real error to the querier.
|
||||
- MUST remove queries from the queue that have passed their wall-clock timeout and return a FederationErrorResponse, even if they have not been sent.
|
||||
- An API to add a EDU/PDU onto the Queue.
|
||||
- MUST require a ResolvedFederationAddress.
|
||||
- MUST take a OutgoingPDU or OutgoingEDU object.
|
||||
- An API to make a Federation query.
|
||||
- MUST require a ResolvedFederationAddress.
|
||||
- MUST list an acceptable timeout. This MAY be 0 to mean that the query should be retried forever.
|
||||
- MUST return a Deferred that fires with a FederationResponse.
|
||||
|
||||
Questions:
|
||||
|
||||
- Typing on interfaces -- there's a mypy zope.interface plugin?
|
||||
- What to do with backpressure on down hosts? Do we discard the queue?
|
||||
|
||||
## Handle Transactions and Queries via the Federation Queue
|
||||
|
||||
Move the FederationSender code to use the Federation Queue.
|
||||
|
||||
Requirements:
|
||||
|
||||
- Externaliser
|
||||
- Takes Queries/EDUs/PDUs and queries the Federation Resolver for the real host information, and then forwards it to the queue.
|
||||
- Synapse MUST instantiate the Externaliser, Federation Queue, and the Federation Connection Pool on startup.
|
||||
- synapse.federation.sender.FederationSender MUST send events to the Externaliser.
|
||||
- ... more words here...
|
||||
|
||||
## Implement the General Purpose Queue
|
||||
|
||||
A Queue that takes general HTTP requests and forwards them to a pool.
|
||||
|
||||
## Handle General Purpose External Communication Via the General Purpose Queue
|
||||
|
||||
Move the URL previewer, well-known lookup to use the General Purpose Queue
|
||||
|
||||
## Implement Queuing and Pooling for Pushers, Appservices, and Identity Servers
|
||||
|
||||
Questions:
|
||||
|
||||
- Is this really needed? Fitting with the existing abstraction is useful, even if it will never rate limit the pool, and we'll get the benefits of the smarter connection pooling
|
||||
|
||||
## TODO: Lay out the optimising section
|
||||
|
||||
{backmatter}
|
||||
|
||||
|
||||
<reference anchor='s2sapi' target='https://matrix.org/docs/spec/server_server/latest'>
|
||||
<front>
|
||||
<title>Federation API</title>
|
||||
<author>
|
||||
<organization>Matrix.org Foundation C.I.C.</organization>
|
||||
</author>
|
||||
<date year='2019'/>
|
||||
</front>
|
||||
</reference>
|
||||
@@ -1,4 +1,4 @@
|
||||
Log contexts
|
||||
Log Contexts
|
||||
============
|
||||
|
||||
.. contents::
|
||||
@@ -12,7 +12,7 @@ record.
|
||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||
which requests were responsible for high CPU use or database activity.
|
||||
|
||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||
The ``synapse.logging.context`` module provides a facilities for managing the
|
||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||
|
||||
Deferreds make the whole thing complicated, so this document describes how it
|
||||
@@ -27,19 +27,19 @@ found them:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from synapse.util import logcontext # omitted from future snippets
|
||||
from synapse.logging import context # omitted from future snippets
|
||||
|
||||
def handle_request(request_id):
|
||||
request_context = logcontext.LoggingContext()
|
||||
request_context = context.LoggingContext()
|
||||
|
||||
calling_context = logcontext.LoggingContext.current_context()
|
||||
logcontext.LoggingContext.set_current_context(request_context)
|
||||
calling_context = context.LoggingContext.current_context()
|
||||
context.LoggingContext.set_current_context(request_context)
|
||||
try:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
finally:
|
||||
logcontext.LoggingContext.set_current_context(calling_context)
|
||||
context.LoggingContext.set_current_context(calling_context)
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
@@ -51,7 +51,7 @@ written much more succinctly as:
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
@@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
yield do_request_handling()
|
||||
logger.debug("finished")
|
||||
@@ -148,7 +148,7 @@ call any other functions.
|
||||
d = more_stuff()
|
||||
result = yield d # also fine, of course
|
||||
|
||||
defer.returnValue(result)
|
||||
return result
|
||||
|
||||
def nonInlineCallbacksFun():
|
||||
logger.debug("just a wrapper really")
|
||||
@@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
|
||||
external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
which returns a deferred which will run its callbacks after a given number of
|
||||
seconds. That might look like:
|
||||
|
||||
@@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
|
||||
This technique works equally for external functions which return deferreds,
|
||||
or deferreds we have made ourselves.
|
||||
|
||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||
You can also use ``context.make_deferred_yieldable``, which just does the
|
||||
boilerplate for you, so the above could be written:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def sleep(seconds):
|
||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
|
||||
|
||||
Fire-and-forget
|
||||
@@ -279,7 +279,7 @@ Obviously that option means that the operations done in
|
||||
that might be fixed by setting a different logcontext via a ``with
|
||||
LoggingContext(...)`` in ``background_operation``).
|
||||
|
||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||
The second option is to use ``context.run_in_background``, which wraps a
|
||||
function so that it doesn't reset the logcontext even when it returns an
|
||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||
@@ -293,7 +293,7 @@ It can be used like this:
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
logcontext.run_in_background(background_operation)
|
||||
context.run_in_background(background_operation)
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
@@ -332,7 +332,7 @@ gathered:
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||
``context.preserve_fn`` almost certainly makes more sense, so that
|
||||
``operation1`` and ``operation2`` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
@@ -340,8 +340,8 @@ logcontext. This looks like:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
d1 = logcontext.preserve_fn(operation1)()
|
||||
d2 = logcontext.preserve_fn(operation2)()
|
||||
d1 = context.preserve_fn(operation1)()
|
||||
d2 = context.preserve_fn(operation2)()
|
||||
|
||||
with PreserveLoggingContext():
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
@@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
d = do_request_handling()
|
||||
|
||||
@@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
|
||||
|
||||
The business of a Deferred which runs its callbacks in the original logcontext
|
||||
isn't hard to achieve — we have it today, in the shape of
|
||||
``logcontext._PreservingContextDeferred``:
|
||||
``context._PreservingContextDeferred``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
|
||||
@@ -59,6 +59,108 @@ How to monitor Synapse metrics using Prometheus
|
||||
Restart Prometheus.
|
||||
|
||||
|
||||
Renaming of metrics & deprecation of old names in 1.2
|
||||
-----------------------------------------------------
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
|
||||
upstream ``prometheus_client``. The old names are considered deprecated and will
|
||||
be removed in a future version of Synapse.
|
||||
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| New Name | Old Name |
|
||||
+=============================================================================+=======================================================================+
|
||||
| python_gc_objects_collected_total | python_gc_objects_collected |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| python_gc_collections_total | python_gc_collections |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| process_cpu_seconds_total | process_cpu_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_start_count_total | synapse_background_process_start_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
|
||||
100
docs/opentracing.rst
Normal file
100
docs/opentracing.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
===========
|
||||
OpenTracing
|
||||
===========
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
OpenTracing is a semi-standard being adopted by a number of distributed tracing
|
||||
platforms. It is a common api for facilitating vendor-agnostic tracing
|
||||
instrumentation. That is, we can use the OpenTracing api and select one of a
|
||||
number of tracer implementations to do the heavy lifting in the background.
|
||||
Our current selected implementation is Jaeger.
|
||||
|
||||
OpenTracing is a tool which gives an insight into the causal relationship of
|
||||
work done in and between servers. The servers each track events and report them
|
||||
to a centralised server - in Synapse's case: Jaeger. The basic unit used to
|
||||
represent events is the span. The span roughly represents a single piece of work
|
||||
that was done and the time at which it occurred. A span can have child spans,
|
||||
meaning that the work of the child had to be completed for the parent span to
|
||||
complete, or it can have follow-on spans which represent work that is undertaken
|
||||
as a result of the parent but is not depended on by the parent to in order to
|
||||
finish.
|
||||
|
||||
Since this is undertaken in a distributed environment a request to another
|
||||
server, such as an RPC or a simple GET, can be considered a span (a unit or
|
||||
work) for the local server. This causal link is what OpenTracing aims to
|
||||
capture and visualise. In order to do this metadata about the local server's
|
||||
span, i.e the 'span context', needs to be included with the request to the
|
||||
remote.
|
||||
|
||||
It is up to the remote server to decide what it does with the spans
|
||||
it creates. This is called the sampling policy and it can be configured
|
||||
through Jaeger's settings.
|
||||
|
||||
For OpenTracing concepts see
|
||||
https://opentracing.io/docs/overview/what-is-tracing/.
|
||||
|
||||
For more information about Jaeger's implementation see
|
||||
https://www.jaegertracing.io/docs/
|
||||
|
||||
=====================
|
||||
Seting up OpenTracing
|
||||
=====================
|
||||
|
||||
To receive OpenTracing spans, start up a Jaeger server. This can be done
|
||||
using docker like so:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -d --name jaeger
|
||||
-p 6831:6831/udp \
|
||||
-p 6832:6832/udp \
|
||||
-p 5778:5778 \
|
||||
-p 16686:16686 \
|
||||
-p 14268:14268 \
|
||||
jaegertracing/all-in-one:1.13
|
||||
|
||||
Latest documentation is probably at
|
||||
https://www.jaegertracing.io/docs/1.13/getting-started/
|
||||
|
||||
|
||||
Enable OpenTracing in Synapse
|
||||
-----------------------------
|
||||
|
||||
OpenTracing is not enabled by default. It must be enabled in the homeserver
|
||||
config by uncommenting the config options under ``opentracing`` as shown in
|
||||
the `sample config <./sample_config.yaml>`_. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opentracing:
|
||||
tracer_enabled: true
|
||||
homeserver_whitelist:
|
||||
- "mytrustedhomeserver.org"
|
||||
- "*.myotherhomeservers.com"
|
||||
|
||||
Homeserver whitelisting
|
||||
-----------------------
|
||||
|
||||
The homeserver whitelist is configured using regular expressions. A list of regular
|
||||
expressions can be given and their union will be compared when propagating any
|
||||
spans contexts to another homeserver.
|
||||
|
||||
Though it's mostly safe to send and receive span contexts to and from
|
||||
untrusted users since span contexts are usually opaque ids it can lead to
|
||||
two problems, namely:
|
||||
|
||||
- If the span context is marked as sampled by the sending homeserver the receiver will
|
||||
sample it. Therefore two homeservers with wildly different sampling policies
|
||||
could incur higher sampling counts than intended.
|
||||
- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse
|
||||
but that doesn't prevent another server sending you baggage which will be logged
|
||||
to OpenTracing's logs.
|
||||
|
||||
==================
|
||||
Configuring Jaeger
|
||||
==================
|
||||
|
||||
Sampling strategies can be set as in this document:
|
||||
https://www.jaegertracing.io/docs/1.13/sampling/
|
||||
@@ -11,7 +11,9 @@ a postgres database.
|
||||
|
||||
* If you are using the `matrix.org debian/ubuntu
|
||||
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||
the necessary libraries will already be installed.
|
||||
the necessary python library will already be installed, but you will need to
|
||||
ensure the low-level postgres library is installed, which you can do with
|
||||
``apt install libpq5``.
|
||||
|
||||
* For other pre-built packages, please consult the documentation from the
|
||||
relevant package.
|
||||
@@ -34,9 +36,14 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
|
||||
su - postgres
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||
would not be able to store UTF8 strings. To create a database with the correct
|
||||
encoding use, e.g.::
|
||||
Before you can authenticate with the ``synapse_user``, you must create a
|
||||
database that it can access. To create a database, first connect to the database
|
||||
with your database user::
|
||||
|
||||
su - postgres
|
||||
psql
|
||||
|
||||
and then run::
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
@@ -46,7 +53,13 @@ encoding use, e.g.::
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
``synapse_user`` user (which must already have been created as above).
|
||||
|
||||
Note that the PostgreSQL database *must* have the correct encoding set (as
|
||||
shown above), otherwise it will not be able to store UTF8 strings.
|
||||
|
||||
You may need to enable password authentication so ``synapse_user`` can connect
|
||||
to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
|
||||
|
||||
Tuning Postgres
|
||||
===============
|
||||
|
||||
@@ -48,6 +48,8 @@ Let's assume that we expect clients to connect to our server at
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
|
||||
|
||||
* Caddy::
|
||||
|
||||
|
||||
@@ -278,6 +278,23 @@ listeners:
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
# limit_remote_rooms.complexity, it will disallow joining or
|
||||
# instantly leave.
|
||||
#
|
||||
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||
# displayed to the user when a room above the complexity threshold has
|
||||
# its join cancelled.
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
@@ -548,6 +565,13 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
|
||||
|
||||
|
||||
## Media Store ##
|
||||
|
||||
# Enable the media store service in the Synapse master. Uncomment the
|
||||
# following if you are using a separate media store worker.
|
||||
#
|
||||
#enable_media_repo: false
|
||||
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
#
|
||||
media_store_path: "DATADIR/media_store"
|
||||
@@ -785,6 +809,27 @@ uploads_path: "DATADIR/uploads"
|
||||
# period: 6w
|
||||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
# # Directory in which Synapse will try to find the HTML files to serve to the
|
||||
# # user when trying to renew an account. Optional, defaults to
|
||||
# # synapse/res/templates.
|
||||
# template_dir: "res/templates"
|
||||
# # HTML to be displayed to the user after they successfully renewed their
|
||||
# # account. Optional.
|
||||
# account_renewed_html_path: "account_renewed.html"
|
||||
# # HTML to be displayed when the user tries to renew an account with an invalid
|
||||
# # renewal token. Optional.
|
||||
# invalid_token_html_path: "invalid_token.html"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
#
|
||||
# Note also that this is calculated at login time: changes are not applied
|
||||
# retrospectively to users who have already logged in.
|
||||
#
|
||||
# By default, this is infinite.
|
||||
#
|
||||
#session_lifetime: 24h
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
@@ -914,10 +959,6 @@ uploads_path: "DATADIR/uploads"
|
||||
#
|
||||
# macaroon_secret_key: <PRIVATE STRING>
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
#expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
@@ -1395,3 +1436,43 @@ password_config:
|
||||
# module: "my_custom_project.SuperRulesSet"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
|
||||
|
||||
## Opentracing ##
|
||||
|
||||
# These settings enable opentracing, which implements distributed tracing.
|
||||
# This allows you to observe the causal chains of events across servers
|
||||
# including requests, key lookups etc., across any server running
|
||||
# synapse or any other other services which supports opentracing
|
||||
# (specifically those implemented with Jaeger).
|
||||
#
|
||||
opentracing:
|
||||
# tracing is disabled by default. Uncomment the following line to enable it.
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
# By defult, it is empty, so no servers are matched.
|
||||
#
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
@@ -206,6 +206,13 @@ Handles the media repository. It can handle all endpoints starting with::
|
||||
|
||||
/_matrix/media/
|
||||
|
||||
And the following regular expressions matching media-specific administration
|
||||
APIs::
|
||||
|
||||
^/_synapse/admin/v1/purge_media_cache$
|
||||
^/_synapse/admin/v1/room/.*/media$
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
|
||||
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository.
|
||||
|
||||
@@ -14,6 +14,11 @@
|
||||
name = "Bugfixes"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "docker"
|
||||
name = "Updates to the Docker image"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "doc"
|
||||
name = "Improved Documentation"
|
||||
@@ -39,6 +44,8 @@ exclude = '''
|
||||
| \.git # root of the project
|
||||
| \.tox
|
||||
| \.venv
|
||||
| \.env
|
||||
| env
|
||||
| _build
|
||||
| _trial_temp.*
|
||||
| build
|
||||
|
||||
12
scripts-dev/lint.sh
Executable file
12
scripts-dev/lint.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
# flake8 - lints and finds mistakes
|
||||
# black - opinionated code formatter
|
||||
|
||||
set -e
|
||||
|
||||
isort -y -rc synapse tests scripts-dev scripts
|
||||
flake8 synapse tests
|
||||
python3 -m black synapse tests scripts-dev scripts
|
||||
@@ -35,4 +35,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.1.0rc1"
|
||||
__version__ = "1.2.1"
|
||||
|
||||
@@ -25,7 +25,13 @@ from twisted.internet import defer
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
@@ -63,7 +69,6 @@ class Auth(object):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
@@ -123,7 +128,7 @@ class Auth(object):
|
||||
)
|
||||
|
||||
self._check_joined_room(member, user_id, room_id)
|
||||
defer.returnValue(member)
|
||||
return member
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_was_in_room(self, room_id, user_id):
|
||||
@@ -151,13 +156,13 @@ class Auth(object):
|
||||
if forgot:
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
defer.returnValue(member)
|
||||
return member
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||
defer.returnValue(latest_event_ids)
|
||||
return latest_event_ids
|
||||
|
||||
def _check_joined_room(self, member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
@@ -189,18 +194,17 @@ class Auth(object):
|
||||
Returns:
|
||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
b"User-Agent", default=[b""]
|
||||
)[0].decode("ascii", "surrogateescape")
|
||||
|
||||
access_token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
@@ -215,9 +219,7 @@ class Auth(object):
|
||||
device_id="dummy-device", # stubbed
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
synapse.types.create_requester(user_id, app_service=app_service)
|
||||
)
|
||||
return synapse.types.create_requester(user_id, app_service=app_service)
|
||||
|
||||
user_info = yield self.get_user_by_access_token(access_token, rights)
|
||||
user = user_info["user"]
|
||||
@@ -258,45 +260,37 @@ class Auth(object):
|
||||
|
||||
request.authenticated_entity = user.to_string()
|
||||
|
||||
defer.returnValue(
|
||||
synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
)
|
||||
return synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service
|
||||
)
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
defer.returnValue((None, None))
|
||||
return (None, None)
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
defer.returnValue((None, None))
|
||||
return (None, None)
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return (app_service.sender, app_service)
|
||||
|
||||
user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return (app_service.sender, app_service)
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(403, "Application service cannot masquerade as this user.")
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(403, "Application service has not registered this user")
|
||||
defer.returnValue((user_id, app_service))
|
||||
return (user_id, app_service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(self, token, rights="access"):
|
||||
@@ -313,14 +307,26 @@ class Auth(object):
|
||||
`token_id` (int|None): access token id. May be None if guest
|
||||
`device_id` (str|None): device corresponding to access token
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
"""
|
||||
|
||||
if rights == "access":
|
||||
# first look in the database
|
||||
r = yield self._look_up_user_by_access_token(token)
|
||||
if r:
|
||||
defer.returnValue(r)
|
||||
valid_until_ms = r["valid_until_ms"]
|
||||
if (
|
||||
valid_until_ms is not None
|
||||
and valid_until_ms < self.clock.time_msec()
|
||||
):
|
||||
# there was a valid access token, but it has expired.
|
||||
# soft-logout the user.
|
||||
raise InvalidClientTokenError(
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
return r
|
||||
|
||||
# otherwise it needs to be a valid macaroon
|
||||
try:
|
||||
@@ -331,11 +337,7 @@ class Auth(object):
|
||||
if not guest:
|
||||
# non-guest access tokens must be in the database
|
||||
logger.warning("Unrecognised access token - not in store.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError()
|
||||
|
||||
# Guest access tokens are not stored in the database (there can
|
||||
# only be one access token per guest, anyway).
|
||||
@@ -350,16 +352,10 @@ class Auth(object):
|
||||
# guest tokens.
|
||||
stored_user = yield self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unknown user_id %s" % user_id,
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Guest access token used for regular user",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
raise InvalidClientTokenError(
|
||||
"Guest access token used for regular user"
|
||||
)
|
||||
ret = {
|
||||
"user": user,
|
||||
@@ -378,7 +374,7 @@ class Auth(object):
|
||||
}
|
||||
else:
|
||||
raise RuntimeError("Unknown rights setting %s", rights)
|
||||
defer.returnValue(ret)
|
||||
return ret
|
||||
except (
|
||||
_InvalidMacaroonException,
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
@@ -386,11 +382,7 @@ class Auth(object):
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Invalid macaroon passed.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||
@@ -418,25 +410,16 @@ class Auth(object):
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
|
||||
has_expiry = False
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith("time "):
|
||||
has_expiry = True
|
||||
elif caveat.caveat_id == "guest = true":
|
||||
if caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
||||
)
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Invalid macaroon passed.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if not has_expiry and rights == "access":
|
||||
if rights == "access":
|
||||
self.token_cache[token] = (user_id, guest)
|
||||
|
||||
return user_id, guest
|
||||
@@ -453,19 +436,16 @@ class Auth(object):
|
||||
(str) user id
|
||||
|
||||
Raises:
|
||||
AuthError if there is no user_id caveat in the macaroon
|
||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||
macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"No user caveat in macaroon",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
@@ -473,7 +453,6 @@ class Auth(object):
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token required (e.g. "access",
|
||||
"delete_pusher")
|
||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||
user_id (str): The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
@@ -486,19 +465,7 @@ class Auth(object):
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
|
||||
# verify_expiry should really always be True, but there exist access
|
||||
# tokens in the wild which expire when they should not, so we can't
|
||||
# enforce expiry yet (so we have to allow any caveat starting with
|
||||
# 'time < ' in access tokens).
|
||||
#
|
||||
# On the other hand, short-term login tokens (as used by CAS login, for
|
||||
# example) have an expiry time which we do want to enforce.
|
||||
|
||||
if verify_expiry:
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
else:
|
||||
v.satisfy_general(lambda c: c.startswith("time < "))
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
@@ -517,7 +484,7 @@ class Auth(object):
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
defer.returnValue(None)
|
||||
return None
|
||||
|
||||
# we use ret.get() below because *lots* of unit tests stub out
|
||||
# get_user_by_access_token in a way where it only returns a couple of
|
||||
@@ -527,26 +494,18 @@ class Auth(object):
|
||||
"token_id": ret.get("token_id", None),
|
||||
"is_guest": False,
|
||||
"device_id": ret.get("device_id"),
|
||||
"valid_until_ms": ret.get("valid_until_ms"),
|
||||
}
|
||||
defer.returnValue(user_info)
|
||||
return user_info
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
except KeyError:
|
||||
raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.")
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
|
||||
def is_server_admin(self, user):
|
||||
""" Check if the given user is a local server admin.
|
||||
@@ -562,7 +521,7 @@ class Auth(object):
|
||||
@defer.inlineCallbacks
|
||||
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
||||
if event.type == EventTypes.Create:
|
||||
defer.returnValue([])
|
||||
return []
|
||||
|
||||
auth_ids = []
|
||||
|
||||
@@ -623,22 +582,7 @@ class Auth(object):
|
||||
if member_event.content["membership"] == Membership.JOIN:
|
||||
auth_ids.append(member_event.event_id)
|
||||
|
||||
defer.returnValue(auth_ids)
|
||||
|
||||
def check_redaction(self, room_version, event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
True if the the sender is allowed to redact the target event if the
|
||||
target event was created by them.
|
||||
False if the sender is allowed to redact the target event with no
|
||||
further checks.
|
||||
|
||||
Raises:
|
||||
AuthError if the event sender is definitely not allowed to redact
|
||||
the target event.
|
||||
"""
|
||||
return event_auth.check_redaction(room_version, event, auth_events)
|
||||
return auth_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
@@ -652,7 +596,7 @@ class Auth(object):
|
||||
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
if is_admin:
|
||||
defer.returnValue(True)
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
@@ -692,20 +636,16 @@ class Auth(object):
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||
def get_access_token_from_request(request):
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
token_not_found_http_status(int): The HTTP status code to set in the
|
||||
AuthError if the token isn't found. This is used in some of the
|
||||
legacy APIs to change the status code to 403 from the default of
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
"""
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
@@ -714,34 +654,20 @@ class Auth(object):
|
||||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
if query_params is not None:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Mixing Authorization headers and access_token query parameters.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
raise MissingClientTokenError(
|
||||
"Mixing Authorization headers and access_token query parameters."
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError("Too many Authorization headers.")
|
||||
parts = auth_headers[0].split(b" ")
|
||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||
return parts[1].decode("ascii")
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Invalid Authorization header.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError("Invalid Authorization header.")
|
||||
else:
|
||||
# Try to get the access_token from the query params.
|
||||
if not query_params:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError()
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@@ -764,7 +690,7 @@ class Auth(object):
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
return (member_event.membership, member_event.event_id)
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
@@ -773,7 +699,7 @@ class Auth(object):
|
||||
visibility
|
||||
and visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return (Membership.JOIN, None)
|
||||
return
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user