Compare commits
278 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ac6a0d72b2 | ||
|
|
9ac72d9543 | ||
|
|
44a4d65586 | ||
|
|
6bb1c028f1 | ||
|
|
f191be822b | ||
|
|
57426ec6a3 | ||
|
|
4bc7483518 | ||
|
|
4dc945ba30 | ||
|
|
802884d4ee | ||
|
|
b8e6ed36c1 | ||
|
|
6fcb25202f | ||
|
|
7a4632af9c | ||
|
|
c0b6955e3b | ||
|
|
c74624a633 | ||
|
|
a1a6473293 | ||
|
|
c4414768af | ||
|
|
a712aa3a9c | ||
|
|
16565e67db | ||
|
|
40c2271680 | ||
|
|
6728bf3940 | ||
|
|
6946c20111 | ||
|
|
71669a0fba | ||
|
|
899a119c2b | ||
|
|
641c409e4e | ||
|
|
70ea2f4e1d | ||
|
|
96c408273e | ||
|
|
1330aa4a8f | ||
|
|
65f3fbfbf7 | ||
|
|
1d0f2ec812 | ||
|
|
c7b333c545 | ||
|
|
69efe6fb16 | ||
|
|
108d5fb20d | ||
|
|
9c598dddcb | ||
|
|
b1a90da82e | ||
|
|
16c7afa94c | ||
|
|
8aaf7ffc44 | ||
|
|
84c0a20dfe | ||
|
|
4b9e5076c4 | ||
|
|
07493607a8 | ||
|
|
bd398b874e | ||
|
|
e4b078a600 | ||
|
|
d730c2c22b | ||
|
|
890cb048fd | ||
|
|
5b9786ee00 | ||
|
|
65d1003d01 | ||
|
|
f5050e148c | ||
|
|
9342cc6ab1 | ||
|
|
21d3f82344 | ||
|
|
47a7e3928d | ||
|
|
65bf9f1119 | ||
|
|
41285ffe5b | ||
|
|
71304bfc8d | ||
|
|
59e0112209 | ||
|
|
d14e94bae4 | ||
|
|
b82c9cf462 | ||
|
|
f2891d2487 | ||
|
|
9982c71515 | ||
|
|
0969d688e3 | ||
|
|
5d3e3c051d | ||
|
|
a164134a53 | ||
|
|
1d9df51ff1 | ||
|
|
e28ef831e6 | ||
|
|
80467bbac3 | ||
|
|
7b288826b7 | ||
|
|
e07384c4e1 | ||
|
|
e1666af9be | ||
|
|
fcd6f01dc7 | ||
|
|
0abb094f1a | ||
|
|
6d65659b62 | ||
|
|
16e0680498 | ||
|
|
b9d6756b14 | ||
|
|
9bccd5e472 | ||
|
|
8184ae8a09 | ||
|
|
82fca11fc1 | ||
|
|
82ca6d1f9f | ||
|
|
633e5c933b | ||
|
|
3d672fec51 | ||
|
|
a06614bd2a | ||
|
|
b2200a8690 | ||
|
|
c88bc53903 | ||
|
|
8d98dc8ffe | ||
|
|
86920ac266 | ||
|
|
dbdc565dfd | ||
|
|
c594cc8076 | ||
|
|
ae753fed8c | ||
|
|
5f9bdf90fe | ||
|
|
c003450057 | ||
|
|
49b58f0a16 | ||
|
|
62175a20e5 | ||
|
|
1bb35e3a83 | ||
|
|
bc8fa1509d | ||
|
|
1c0eb8bbb2 | ||
|
|
a288bdf0b1 | ||
|
|
5a707a2f9a | ||
|
|
a8626901cd | ||
|
|
32590b7139 | ||
|
|
7c70b8f8a6 | ||
|
|
107aeb6915 | ||
|
|
968a30a75c | ||
|
|
0869f01e74 | ||
|
|
2b2466f78b | ||
|
|
561eebe170 | ||
|
|
34ac75ce2c | ||
|
|
92e6fb5c89 | ||
|
|
a9b5ea6fc1 | ||
|
|
f8b9ca53ce | ||
|
|
d154f5a055 | ||
|
|
f3ab0b2390 | ||
|
|
128902d60a | ||
|
|
4cc4400b4d | ||
|
|
fc2c245a1f | ||
|
|
459d3d5046 | ||
|
|
d328a93b51 | ||
|
|
af691e415c | ||
|
|
028267acd2 | ||
|
|
d08bac4136 | ||
|
|
c30f73c86a | ||
|
|
092b541401 | ||
|
|
45bb55c6de | ||
|
|
8b9ae6d3a6 | ||
|
|
94960cef03 | ||
|
|
12ae64ce0d | ||
|
|
fe725f7e45 | ||
|
|
e85aabb030 | ||
|
|
d9713e916e | ||
|
|
04dad5ac16 | ||
|
|
2f16857ca9 | ||
|
|
e07cc31cb8 | ||
|
|
68a53f825f | ||
|
|
32e54b472a | ||
|
|
915421065b | ||
|
|
d1b060b492 | ||
|
|
7033b05cad | ||
|
|
9caab0c364 | ||
|
|
dc5efc92a8 | ||
|
|
e83a190643 | ||
|
|
41c3f21c3b | ||
|
|
91c8a7f9f4 | ||
|
|
eb2b8523ae | ||
|
|
5b68e12fd8 | ||
|
|
6d02a13d81 | ||
|
|
4151111d95 | ||
|
|
6575df647d | ||
|
|
68d2869c8d | ||
|
|
da95867d30 | ||
|
|
bd4505f765 | ||
|
|
f86b695cbd | ||
|
|
af8a2f679b | ||
|
|
1895d14e12 | ||
|
|
b99c532c1c | ||
|
|
02c729d6b0 | ||
|
|
02c46acc6a | ||
|
|
bfcefbb230 | ||
|
|
6f47bc3fb2 | ||
|
|
8e32f26cb8 | ||
|
|
cb12a37708 | ||
|
|
f61b2068e6 | ||
|
|
f666fe36d7 | ||
|
|
bf4fd14806 | ||
|
|
f830a3be2a | ||
|
|
649fe1c2be | ||
|
|
f595d6ac57 | ||
|
|
f311018823 | ||
|
|
4074c8b968 | ||
|
|
eaf4d11af9 | ||
|
|
b02465b9db | ||
|
|
00cf679bf2 | ||
|
|
06cd757ae7 | ||
|
|
0927adb012 | ||
|
|
7fc1196a36 | ||
|
|
a214ba93e0 | ||
|
|
6cb415b63f | ||
|
|
c6e75c9f2d | ||
|
|
3bc238629e | ||
|
|
c1dfd6a18a | ||
|
|
464c301584 | ||
|
|
309f3bb322 | ||
|
|
bb4fd8f927 | ||
|
|
2d0e0a4044 | ||
|
|
767686af48 | ||
|
|
2a5a15aff8 | ||
|
|
e3a0300431 | ||
|
|
dc70789056 | ||
|
|
93f7d2df3e | ||
|
|
6a8f902edb | ||
|
|
ef2228c890 | ||
|
|
19818d66af | ||
|
|
d2fa7b7e99 | ||
|
|
ba3f27b69a | ||
|
|
b18cd25e42 | ||
|
|
cf82338930 | ||
|
|
3df8fcca25 | ||
|
|
495ea92350 | ||
|
|
b2327eb9cb | ||
|
|
483ba85c7a | ||
|
|
218cc071c5 | ||
|
|
362d80b770 | ||
|
|
3c03c37883 | ||
|
|
2418b91bb7 | ||
|
|
a4ce91396b | ||
|
|
32b781bfe2 | ||
|
|
dfc846a316 | ||
|
|
46b8a79b3a | ||
|
|
8a2e316413 | ||
|
|
91f8cd3307 | ||
|
|
0ca2908653 | ||
|
|
4fddf8fc77 | ||
|
|
15272f837c | ||
|
|
9645728619 | ||
|
|
be794c7cf7 | ||
|
|
2129dd1a02 | ||
|
|
086f6f27d4 | ||
|
|
5d27730a73 | ||
|
|
719e073f00 | ||
|
|
24b7f3916d | ||
|
|
c475275926 | ||
|
|
eff2042217 | ||
|
|
a126f86eec | ||
|
|
6e2a5aa050 | ||
|
|
4ffd10f46d | ||
|
|
b201149c7e | ||
|
|
2dc2b6e9f1 | ||
|
|
56710c7df5 | ||
|
|
9cd33d2f4b | ||
|
|
4588b0d64a | ||
|
|
afae8442b5 | ||
|
|
d008330d7d | ||
|
|
acb2ac5863 | ||
|
|
7cadc4c918 | ||
|
|
188ad47e73 | ||
|
|
43e16ea3bc | ||
|
|
9285d5c2ce | ||
|
|
7a22a645b5 | ||
|
|
624b172e08 | ||
|
|
c17b128b83 | ||
|
|
9b7aa543d9 | ||
|
|
d9e424bf64 | ||
|
|
51b73be63b | ||
|
|
9ff620a518 | ||
|
|
8248637173 | ||
|
|
664c81e8b7 | ||
|
|
7fe407a87a | ||
|
|
d8e63846e2 | ||
|
|
6fe1db5631 | ||
|
|
b05dd4ac06 | ||
|
|
2475434080 | ||
|
|
627ecd358e | ||
|
|
ef43a03fc5 | ||
|
|
f8db967d5a | ||
|
|
9763a73af0 | ||
|
|
f0ba34f581 | ||
|
|
c45fd0dda0 | ||
|
|
625385d684 | ||
|
|
d239f67c25 | ||
|
|
3ed3cb4339 | ||
|
|
bbb97a35fd | ||
|
|
563f6a832b | ||
|
|
fb50934b8f | ||
|
|
cf9a2676d0 | ||
|
|
6fba9fd20c | ||
|
|
d621c5562e | ||
|
|
ad7ac8853c | ||
|
|
d528406cb8 | ||
|
|
6f680241bd | ||
|
|
1838ef1ac3 | ||
|
|
a47fac9af6 | ||
|
|
db33634b1d | ||
|
|
0516dc4d85 | ||
|
|
d39b7b6d38 | ||
|
|
1d2c69fee8 | ||
|
|
82e13662c0 | ||
|
|
5336e49b39 | ||
|
|
a17bac171f | ||
|
|
6d25599098 | ||
|
|
4f8f41c824 | ||
|
|
899e60be80 | ||
|
|
ab97b6e33c | ||
|
|
ae19a7db8c |
@@ -8,6 +8,7 @@ jobs:
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
@@ -17,6 +18,7 @@ jobs:
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py2
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
sytestpy2:
|
||||
docker:
|
||||
|
||||
84
.gitignore
vendored
84
.gitignore
vendored
@@ -1,64 +1,36 @@
|
||||
*.pyc
|
||||
.*.swp
|
||||
# filename patterns
|
||||
*~
|
||||
.*.swp
|
||||
.#*
|
||||
*.deb
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
|
||||
.DS_Store
|
||||
*.pyc
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
logs/
|
||||
dbs/
|
||||
*.egg
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
pip-wheel-metadata/
|
||||
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
homeserver*.log
|
||||
homeserver*.log.*
|
||||
homeserver*.pid
|
||||
/homeserver*.yaml
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.signing.key
|
||||
/*.tls.crt
|
||||
/*.tls.key
|
||||
/uploads
|
||||
/media_store/
|
||||
|
||||
*.signing.key
|
||||
*.tls.crt
|
||||
*.tls.dh
|
||||
*.tls.key
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
/.vscode/
|
||||
|
||||
.coverage*
|
||||
coverage.*
|
||||
!.coveragerc
|
||||
htmlcov
|
||||
# build products
|
||||
/.coverage*
|
||||
!/.coveragerc
|
||||
/.tox
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
uploads
|
||||
cache
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
|
||||
*.tac
|
||||
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
*venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
.tox
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
*.deb
|
||||
/debs
|
||||
|
||||
60
.travis.yml
60
.travis.yml
@@ -1,4 +1,4 @@
|
||||
sudo: false
|
||||
dist: xenial
|
||||
language: python
|
||||
|
||||
cache:
|
||||
@@ -12,9 +12,6 @@ cache:
|
||||
#
|
||||
- $HOME/.cache/pip/wheels
|
||||
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
|
||||
# don't clone the whole repo history, one commit will do
|
||||
git:
|
||||
depth: 1
|
||||
@@ -25,6 +22,7 @@ branches:
|
||||
- master
|
||||
- develop
|
||||
- /^release-v/
|
||||
- rav/pg95
|
||||
|
||||
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||
@@ -32,46 +30,62 @@ branches:
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
- name: "pep8"
|
||||
python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort,packaging"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / sqlite"
|
||||
python: 2.7
|
||||
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / sqlite / olddeps"
|
||||
python: 2.7
|
||||
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
- name: "py2.7 / postgres9.5"
|
||||
python: 2.7
|
||||
addons:
|
||||
postgresql: "9.5"
|
||||
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.5
|
||||
- name: "py3.5 / sqlite"
|
||||
python: 3.5
|
||||
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36,codecov TRIAL_FLAGS="-j 2"
|
||||
- name: "py3.7 / sqlite"
|
||||
python: 3.7
|
||||
env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
- name: "py3.7 / postgres9.4"
|
||||
python: 3.7
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- name: "py3.7 / postgres9.5"
|
||||
python: 3.7
|
||||
addons:
|
||||
postgresql: "9.5"
|
||||
env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- # we only need to check for the newsfragment if it's a PR build
|
||||
if: type = pull_request
|
||||
name: "check-newsfragment"
|
||||
python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
- tox -e $TOX_ENV
|
||||
script: scripts-dev/check-newsfragment
|
||||
|
||||
install:
|
||||
# this just logs the postgres version we will be testing against (if any)
|
||||
- psql -At -U postgres -c 'select version();' || true
|
||||
|
||||
- pip install tox
|
||||
|
||||
|
||||
# if we don't have python3.6 in this environment, travis unhelpfully gives us
|
||||
# a `python3.6` on our path which does nothing but spit out a warning. Tox
|
||||
# tries to run it (even if we're not running a py36 env), so the build logs
|
||||
|
||||
115
CHANGES.md
115
CHANGES.md
@@ -1,3 +1,118 @@
|
||||
Synapse 0.99.2 (2019-03-01)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
|
||||
- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
|
||||
- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
|
||||
- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
|
||||
- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
|
||||
- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
|
||||
- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
|
||||
- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
|
||||
- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
|
||||
- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
|
||||
- The dependency checker now correctly reports a version mismatch for optional
|
||||
dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
|
||||
- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
|
||||
- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
|
||||
- Fix an issue in the database migration script where the
|
||||
`e2e_room_keys.is_verified` column wasn't considered as
|
||||
a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
|
||||
- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
|
||||
- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
|
||||
- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
|
||||
- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
|
||||
- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
|
||||
- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
|
||||
- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
|
||||
- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
|
||||
- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
|
||||
- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
|
||||
- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
|
||||
- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
|
||||
- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
|
||||
- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
|
||||
- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
|
||||
- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
|
||||
- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
|
||||
- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
|
||||
- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
|
||||
- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
|
||||
- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
|
||||
- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
|
||||
|
||||
|
||||
Synapse 0.99.1.1 (2019-02-14)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix "TypeError: '>' not supported" when starting without an existing certificate.
|
||||
Fix a bug where an existing certificate would be reprovisoned every day. ([\#4648](https://github.com/matrix-org/synapse/issues/4648))
|
||||
|
||||
|
||||
Synapse 0.99.1 (2019-02-14)
|
||||
===========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Include m.room.encryption on invites by default ([\#3902](https://github.com/matrix-org/synapse/issues/3902))
|
||||
- Federation OpenID listener resource can now be activated even if federation is disabled ([\#4420](https://github.com/matrix-org/synapse/issues/4420))
|
||||
- Synapse's ACME support will now correctly reprovision a certificate that approaches its expiry while Synapse is running. ([\#4522](https://github.com/matrix-org/synapse/issues/4522))
|
||||
- Add ability to update backup versions ([\#4580](https://github.com/matrix-org/synapse/issues/4580))
|
||||
- Allow the "unavailable" presence status for /sync.
|
||||
This change makes Synapse compliant with r0.4.0 of the Client-Server specification. ([\#4592](https://github.com/matrix-org/synapse/issues/4592))
|
||||
- There is no longer any need to specify `no_tls`: it is inferred from the absence of TLS listeners ([\#4613](https://github.com/matrix-org/synapse/issues/4613), [\#4615](https://github.com/matrix-org/synapse/issues/4615), [\#4617](https://github.com/matrix-org/synapse/issues/4617), [\#4636](https://github.com/matrix-org/synapse/issues/4636))
|
||||
- The default configuration no longer requires TLS certificates. ([\#4614](https://github.com/matrix-org/synapse/issues/4614))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Copy over room federation ability on room upgrade. ([\#4530](https://github.com/matrix-org/synapse/issues/4530))
|
||||
- Fix noisy "twisted.internet.task.TaskStopped" errors in logs ([\#4546](https://github.com/matrix-org/synapse/issues/4546))
|
||||
- Synapse is now tolerant of the `tls_fingerprints` option being None or not specified. ([\#4589](https://github.com/matrix-org/synapse/issues/4589))
|
||||
- Fix 'no unique or exclusion constraint' error ([\#4591](https://github.com/matrix-org/synapse/issues/4591))
|
||||
- Transfer Server ACLs on room upgrade. ([\#4608](https://github.com/matrix-org/synapse/issues/4608))
|
||||
- Fix failure to start when not TLS certificate was given even if TLS was disabled. ([\#4618](https://github.com/matrix-org/synapse/issues/4618))
|
||||
- Fix self-signed cert notice from generate-config. ([\#4625](https://github.com/matrix-org/synapse/issues/4625))
|
||||
- Fix performance of `user_ips` table deduplication background update ([\#4626](https://github.com/matrix-org/synapse/issues/4626), [\#4627](https://github.com/matrix-org/synapse/issues/4627))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Change the user directory state query to use a filtered call to the db instead of a generic one. ([\#4462](https://github.com/matrix-org/synapse/issues/4462))
|
||||
- Reject federation transactions if they include more than 50 PDUs or 100 EDUs. ([\#4513](https://github.com/matrix-org/synapse/issues/4513))
|
||||
- Reduce duplication of ``synapse.app`` code. ([\#4567](https://github.com/matrix-org/synapse/issues/4567))
|
||||
- Fix docker upload job to push -py2 images. ([\#4576](https://github.com/matrix-org/synapse/issues/4576))
|
||||
- Add port configuration information to ACME instructions. ([\#4578](https://github.com/matrix-org/synapse/issues/4578))
|
||||
- Update MSC1711 FAQ to calrify .well-known usage ([\#4584](https://github.com/matrix-org/synapse/issues/4584))
|
||||
- Clean up default listener configuration ([\#4586](https://github.com/matrix-org/synapse/issues/4586))
|
||||
- Clarifications for reverse proxy docs ([\#4607](https://github.com/matrix-org/synapse/issues/4607))
|
||||
- Move ClientTLSOptionsFactory init out of `refresh_certificates` ([\#4611](https://github.com/matrix-org/synapse/issues/4611))
|
||||
- Fail cleanly if listener config lacks a 'port' ([\#4616](https://github.com/matrix-org/synapse/issues/4616))
|
||||
- Remove redundant entries from docker config ([\#4619](https://github.com/matrix-org/synapse/issues/4619))
|
||||
- README updates ([\#4621](https://github.com/matrix-org/synapse/issues/4621))
|
||||
|
||||
|
||||
Synapse 0.99.0 (2019-02-05)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||
If your change breaks the build, this will be shown in GitHub, so please
|
||||
@@ -74,16 +74,39 @@ entry. These are managed by Towncrier
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain Markdown
|
||||
formatting. Adding credits to the changelog is encouraged, we value
|
||||
your contributions and would like to have you shouted out in the
|
||||
release notes!
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix".
|
||||
Matrix.".
|
||||
|
||||
Debian changelog
|
||||
----------------
|
||||
|
||||
Changes which affect the debian packaging files (in ``debian``) are an
|
||||
exception.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command::
|
||||
|
||||
dch
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
38
INSTALL.md
38
INSTALL.md
@@ -350,17 +350,33 @@ Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||
termination on port 443 which in turn should be used for clients. Port 8448
|
||||
is configured to use TLS for Federation with a self-signed or verified
|
||||
certificate, but please be aware that a valid certificate will be required in
|
||||
Synapse v1.0. Instructions for having Synapse automatically provision and renew federation certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||
is suitable for local testing, but for any practical use, you will either need
|
||||
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||
|
||||
If you would like to use your own certificates, you can do so by changing
|
||||
`tls_certificate_path` and `tls_private_key_path` in `homeserver.yaml`;
|
||||
alternatively, you can use a reverse-proxy. Apart from port 8448 using TLS,
|
||||
both ports are the same in the default configuration.
|
||||
For information on using a reverse proxy, see
|
||||
[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`, as follows:
|
||||
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
|
||||
## Registering a user
|
||||
|
||||
@@ -374,7 +390,7 @@ users. This can be done as follows:
|
||||
```
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
|
||||
150
README.rst
150
README.rst
@@ -26,7 +26,6 @@ via IRC bridge at irc://irc.freenode.net/matrix.
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
@@ -88,18 +87,20 @@ Connecting to Synapse from a client
|
||||
===================================
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
https://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||
server as the default - see `Identity servers`_.)
|
||||
from a web client.
|
||||
|
||||
If using port 8448 you will run into errors if you are using a self-signed
|
||||
certificate. To overcome this, simply go to ``https://localhost:8448``
|
||||
directly with your browser and accept the presented certificate. You can then
|
||||
go back in your web client and proceed further. Valid federation certificates
|
||||
should not have this problem.
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Riot at
|
||||
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
(Leave the identity server as the default - see `Identity servers`_.)
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
@@ -174,9 +175,32 @@ Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
-------------------------------
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall amount, and especially in terms of giving back RAM
|
||||
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
||||
environment variable when launching Synapse. On Debian, this can be done
|
||||
by installing the ``libjemalloc1`` package and adding this line to
|
||||
``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
@@ -196,12 +220,12 @@ Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
The ``server_name`` in your
|
||||
``homeserver.yaml`` file determines the way that other servers will reach
|
||||
yours. By default, they will treat it as a hostname and try to connect to
|
||||
port 8448. This is easy to set up and will work with the default configuration,
|
||||
provided you set the ``server_name`` to match your machine's public DNS
|
||||
hostname.
|
||||
The ``server_name`` in your ``homeserver.yaml`` file determines the way that
|
||||
other servers will reach yours. By default, they will treat it as a hostname
|
||||
and try to connect to port 8448. This is easy to set up and will work with the
|
||||
default configuration, provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and give Synapse a TLS certificate which is
|
||||
valid for your ``server_name``.
|
||||
|
||||
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||
you to run your server on a machine that might not have the same name as your
|
||||
@@ -243,11 +267,8 @@ largest boxes pause for thought.)
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
You can use the federation tester to check if your homeserver is all set:
|
||||
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
||||
If any of the attributes under "checks" is false, federation won't work.
|
||||
There is also a nicer interface available from a community member at
|
||||
`<https://neo.lain.haus/fed-tester>`_.
|
||||
You can use the `federation tester <https://matrix.org/federationtester>`_ to
|
||||
check if your homeserver is all set.
|
||||
|
||||
The typical failure mode with federation is that when you try to join a room,
|
||||
it is rejected with "401: Unauthorized". Generally this means that other
|
||||
@@ -263,6 +284,11 @@ So, things to check are:
|
||||
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||
it specifies are reachable from outside your network.
|
||||
|
||||
Another common problem is that people on other servers can't join rooms that
|
||||
you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
Running a Demo Federation of Synapses
|
||||
-------------------------------------
|
||||
|
||||
@@ -290,7 +316,6 @@ The advantages of Postgres include:
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
@@ -304,54 +329,7 @@ It is recommended to put a reverse proxy such as
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
The most important thing to know here is that Matrix clients and other Matrix
|
||||
servers do not necessarily need to connect to your server via the same
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'.
|
||||
|
||||
All Matrix endpoints begin with ``/_matrix``, so an example nginx
|
||||
configuration for forwarding client connections to Synapse might look like::
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
server_name matrix.example.com;
|
||||
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
an example Caddy configuration might look like::
|
||||
|
||||
matrix.example.com {
|
||||
proxy /_matrix http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
}
|
||||
|
||||
and an example Apache configuration might look like::
|
||||
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||
Synapse from a client`_.
|
||||
|
||||
For information on configuring one, see `<docs/reverse_proxy.rst>`_.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
@@ -409,7 +387,7 @@ Synapse Development
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source`_.
|
||||
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
@@ -420,7 +398,7 @@ directory of your choice::
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python2.7 env
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install -e .[all]
|
||||
|
||||
@@ -462,25 +440,3 @@ sphinxcontrib-napoleon::
|
||||
Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall amount, and especially in terms of giving back RAM
|
||||
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
||||
environment variable when launching Synapse. On Debian, this can be done
|
||||
by installing the ``libjemalloc1`` package and adding this line to
|
||||
``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
@@ -39,7 +39,7 @@ instructions that may be required are listed later in this document.
|
||||
./synctl restart
|
||||
|
||||
|
||||
To check whether your update was sucessful, you can check the Server header
|
||||
To check whether your update was successful, you can check the Server header
|
||||
returned by the Client-Server API:
|
||||
|
||||
.. code:: bash
|
||||
@@ -57,7 +57,7 @@ will need to replace any self-signed certificates with those verified by a
|
||||
root CA. Information on how to do so can be found at `the ACME docs
|
||||
<docs/ACME.md>`_.
|
||||
|
||||
For more information on configuring TLS certificates see the `FAQ <https://github.com/matrix-org/synapse/blob/master/docs/MSC1711_certificates_FAQ.md>`_
|
||||
For more information on configuring TLS certificates see the `FAQ <docs/MSC1711_certificates_FAQ.md>`_.
|
||||
|
||||
Upgrading to v0.34.0
|
||||
====================
|
||||
|
||||
23
debian/changelog
vendored
23
debian/changelog
vendored
@@ -1,3 +1,26 @@
|
||||
matrix-synapse-py3 (0.99.2) stable; urgency=medium
|
||||
|
||||
* Fix overwriting of config settings on upgrade.
|
||||
* New synapse release 0.99.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Mar 2019 10:55:08 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 17:19:44 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1) stable; urgency=medium
|
||||
|
||||
[ Damjan Georgievski ]
|
||||
* Added ExecReload= in service unit file to send a HUP signal
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 0.99.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 14:12:26 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.0
|
||||
|
||||
1
debian/install
vendored
1
debian/install
vendored
@@ -1 +1,2 @@
|
||||
debian/log.yaml etc/matrix-synapse
|
||||
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
|
||||
|
||||
130
debian/manage_debconf.pl
vendored
Executable file
130
debian/manage_debconf.pl
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Interface between our config files and the debconf database.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# manage_debconf.pl <action>
|
||||
#
|
||||
# where <action> can be:
|
||||
#
|
||||
# read: read the configuration from the yaml into debconf
|
||||
# update: update the yaml config according to the debconf database
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Debconf::Client::ConfModule (qw/get set/);
|
||||
|
||||
# map from the name of a setting in our .yaml file to the relevant debconf
|
||||
# setting.
|
||||
my %MAPPINGS=(
|
||||
server_name => 'matrix-synapse/server-name',
|
||||
report_stats => 'matrix-synapse/report-stats',
|
||||
);
|
||||
|
||||
# enable debug if dpkg --debug
|
||||
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
|
||||
|
||||
sub read_config {
|
||||
my @files = @_;
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "reading $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
# rudimentary parsing which (a) avoids having to depend on a yaml library,
|
||||
# and (b) is tolerant of yaml errors
|
||||
while($_ = <$FH>) {
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
$setting = quotemeta $setting;
|
||||
if(/^${setting}\s*:(.*)$/) {
|
||||
my $val = $1;
|
||||
|
||||
# remove leading/trailing whitespace
|
||||
$val =~ s/^\s*//;
|
||||
$val =~ s/\s*$//;
|
||||
|
||||
# remove surrounding quotes
|
||||
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
|
||||
$val = $1;
|
||||
}
|
||||
|
||||
print STDERR ">> $debconf = $val\n" if $DEBUG;
|
||||
set($debconf, $val);
|
||||
}
|
||||
}
|
||||
}
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
sub update_config {
|
||||
my @files = @_;
|
||||
|
||||
my %substs = ();
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
my @res = get($debconf);
|
||||
$substs{$setting} = $res[1] if $res[0] == 0;
|
||||
}
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "checking $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
my $updated = 0;
|
||||
|
||||
# read the whole file into memory
|
||||
my @lines = <$FH>;
|
||||
|
||||
while (my ($setting, $val) = each %substs) {
|
||||
$setting = quotemeta $setting;
|
||||
|
||||
map {
|
||||
if (/^${setting}\s*:\s*(.*)\s*$/) {
|
||||
my $current = $1;
|
||||
if ($val ne $current) {
|
||||
$_ = "${setting}: $val\n";
|
||||
$updated = 1;
|
||||
}
|
||||
}
|
||||
} @lines;
|
||||
}
|
||||
close $FH;
|
||||
|
||||
next unless $updated;
|
||||
|
||||
print STDERR "updating $file\n" if $DEBUG;
|
||||
open $FH, ">", $file or die "unable to update $file";
|
||||
print $FH @lines;
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
|
||||
my $read = 0;
|
||||
my $update = 0;
|
||||
|
||||
if (not $cmd) {
|
||||
die "must specify a command to perform\n";
|
||||
} elsif ($cmd eq 'read') {
|
||||
$read = 1;
|
||||
} elsif ($cmd eq 'update') {
|
||||
$update = 1;
|
||||
} else {
|
||||
die "unknown command '$cmd'\n";
|
||||
}
|
||||
|
||||
my @files = (
|
||||
"/etc/matrix-synapse/homeserver.yaml",
|
||||
glob("/etc/matrix-synapse/conf.d/*.yaml"),
|
||||
);
|
||||
|
||||
if ($read) {
|
||||
read_config(@files);
|
||||
} elsif ($update) {
|
||||
update_config(@files);
|
||||
}
|
||||
@@ -4,6 +4,9 @@ set -e
|
||||
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
# try to update the debconf db according to whatever is in the config files
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
|
||||
|
||||
db_input high matrix-synapse/server-name || true
|
||||
db_input high matrix-synapse/report-stats || true
|
||||
db_go
|
||||
31
debian/matrix-synapse-py3.postinst
vendored
31
debian/matrix-synapse-py3.postinst
vendored
@@ -8,19 +8,36 @@ USER="matrix-synapse"
|
||||
|
||||
case "$1" in
|
||||
configure|reconfigure)
|
||||
# Set server name in config file
|
||||
|
||||
# generate template config files if they don't exist
|
||||
mkdir -p "/etc/matrix-synapse/conf.d/"
|
||||
db_get matrix-synapse/server-name
|
||||
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
|
||||
cat > "$CONFIGFILE_SERVERNAME" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
if [ "$RET" ]; then
|
||||
echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
|
||||
# The domain name of the server, with optional explicit port.
|
||||
# This is used by remote servers to connect to this server,
|
||||
# e.g. matrix.org, localhost:8080, etc.
|
||||
# This is also the last part of your UserID.
|
||||
#
|
||||
server_name: ''
|
||||
EOF
|
||||
fi
|
||||
|
||||
db_get matrix-synapse/report-stats
|
||||
if [ "$RET" ]; then
|
||||
echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
|
||||
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
|
||||
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
|
||||
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
|
||||
# Any changes you make will be preserved.
|
||||
|
||||
# Whether to report anonymized homeserver usage statistics.
|
||||
report_stats: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
# update the config files according to whatever is in the debconf database
|
||||
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
|
||||
|
||||
if ! getent passwd $USER >/dev/null; then
|
||||
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
|
||||
fi
|
||||
|
||||
1
debian/matrix-synapse.service
vendored
1
debian/matrix-synapse.service
vendored
@@ -8,6 +8,7 @@ WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
|
||||
|
||||
7
demo/.gitignore
vendored
Normal file
7
demo/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
*.pid
|
||||
|
||||
/media_store.*
|
||||
/etc
|
||||
@@ -58,7 +58,11 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
sqlite3
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
|
||||
RUN apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
# cached cache from docker the first time.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
WORKDIR /synapse/source
|
||||
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICnTCCAYUCAgPoMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNVBAMMCWxvY2FsaG9z
|
||||
dDAeFw0xOTAxMTUwMDQxNTBaFw0yOTAxMTIwMDQxNTBaMBQxEjAQBgNVBAMMCWxv
|
||||
Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKqm81/8j5d
|
||||
R1s7VZ8ueg12gJrPVCCAOkp0UnuC/ZlXhN0HTvnhQ+B0IlSgB4CcQZyf4jnA6o4M
|
||||
rwSc7VX0MPE9x/idoA0g/0WoC6tsxugOrvbzCw8Tv+fnXglm6uVc7aFPfx69wU3q
|
||||
lUHGD/8jtEoHxmCG177Pt2lHAfiVLBAyMQGtETzxt/yAfkloaybe316qoljgK5WK
|
||||
cokdAt9G84EEqxNeEnx5FG3Vc100bAqJS4GvQlFgtF9KFEqZKEyB1yKBpPMDfPIS
|
||||
V9hIV0gswSmYI8dpyBlGf5lPElY68ZGABmOQgr0RI5qHK/h28OpFPE0q3v4AMHgZ
|
||||
I36wii4NrAUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAfD8kcpZ+dn08xh1qtKtp
|
||||
X+/YNZaOBIeVdlCzfoZKNblSFAFD/jCfObNJYvZMUQ8NX2UtEJp1lTA6m7ltSsdY
|
||||
gpC2k1VD8iN+ooXklJmL0kxc7UUqho8I0l9vn35h+lhLF0ihT6XfZVi/lDHWl+4G
|
||||
rG+v9oxvCSCWrNWLearSlFPtQQ8xPtOE0nLwfXtOI/H/2kOuC38ihaIWM4jjbWXK
|
||||
E/ksgUfuDv0mFiwf1YdBF5/M3/qOowqzU8HgMJ3WoT/9Po5Ya1pWc+3BcxxytUDf
|
||||
XdMu0tWHKX84tZxLcR1nZHzluyvFFM8xNtLi9xV0Z7WbfT76V0C/ulEOybGInYsv
|
||||
nQ==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -2,13 +2,7 @@
|
||||
|
||||
## TLS ##
|
||||
|
||||
{% if SYNAPSE_NO_TLS %}
|
||||
no_tls: True
|
||||
|
||||
# workaround for https://github.com/matrix-org/synapse/issues/4554
|
||||
tls_certificate_path: "/conf/dummy.tls.crt"
|
||||
|
||||
{% else %}
|
||||
{% if not SYNAPSE_NO_TLS %}
|
||||
|
||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||
|
||||
43
docs/ACME.md
43
docs/ACME.md
@@ -10,13 +10,14 @@ through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
|
||||
|
||||
In the case that your `server_name` config variable is the same as
|
||||
the hostname that the client connects to, then the same certificate can be
|
||||
used between client and federation ports without issue.
|
||||
used between client and federation ports without issue.
|
||||
|
||||
For a sample configuration, please inspect the new ACME section in the example
|
||||
generated config by running the `generate-config` executable. For example:
|
||||
If your configuration file does not already have an `acme` section, you can
|
||||
generate an example config by running the `generate_config` executable. For
|
||||
example:
|
||||
|
||||
```
|
||||
~/synapse/env3/bin/generate-config
|
||||
~/synapse/env3/bin/generate_config
|
||||
```
|
||||
|
||||
You will need to provide Let's Encrypt (or another ACME provider) access to
|
||||
@@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
|
||||
(Do not run Synapse with root permissions!) Detailed instructions are
|
||||
available under "ACME setup" below.
|
||||
|
||||
If you are already using self-signed certificates, you will need to back up
|
||||
or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
|
||||
Synapse's root directory), Synapse's ACME implementation will not overwrite
|
||||
them.
|
||||
If you already have certificates, you will need to back up or delete them
|
||||
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
|
||||
directory), Synapse's ACME implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
@@ -41,10 +41,10 @@ placed in Synapse's config directory without the need for any ACME setup.
|
||||
|
||||
The main steps for enabling ACME support in short summary are:
|
||||
|
||||
1. Allow Synapse to listen on port 80 with authbind, or forward it from a reverse-proxy.
|
||||
1. Set `acme:enabled` to `true` in homeserver.yaml.
|
||||
1. Allow Synapse to listen for incoming ACME challenges.
|
||||
1. Enable ACME support in `homeserver.yaml`.
|
||||
1. Move your old certificates (files `example.com.tls.crt` and `example.com.tls.key` out of the way if they currently exist at the paths specified in `homeserver.yaml`.
|
||||
1. Restart Synapse
|
||||
1. Restart Synapse.
|
||||
|
||||
Detailed instructions for each step are provided below.
|
||||
|
||||
@@ -71,7 +71,7 @@ location /.well-known/acme-challenge {
|
||||
}
|
||||
```
|
||||
|
||||
For Apache, add the following to your existing webserver config::
|
||||
For Apache, add the following to your existing webserver config:
|
||||
|
||||
```
|
||||
ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-challenge
|
||||
@@ -79,6 +79,13 @@ ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-cha
|
||||
|
||||
Make sure to restart/reload your webserver after making changes.
|
||||
|
||||
Now make the relevant changes in `homeserver.yaml` to enable ACME support:
|
||||
|
||||
```
|
||||
acme:
|
||||
enabled: true
|
||||
port: 8009
|
||||
```
|
||||
|
||||
#### Authbind
|
||||
|
||||
@@ -102,25 +109,21 @@ sudo touch /etc/authbind/byport/80
|
||||
sudo chmod 777 /etc/authbind/byport/80
|
||||
```
|
||||
|
||||
When Synapse is started, use the following syntax::
|
||||
When Synapse is started, use the following syntax:
|
||||
|
||||
```
|
||||
authbind --deep <synapse start command>
|
||||
```
|
||||
|
||||
### Config file editing
|
||||
|
||||
Once Synapse is able to listen on port 80 for ACME challenge
|
||||
requests, it must be told to perform ACME provisioning by setting `enabled`
|
||||
to true under the `acme` section in `homeserver.yaml`:
|
||||
Make the relevant changes in `homeserver.yaml` to enable ACME support:
|
||||
|
||||
```
|
||||
acme:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### Starting synapse
|
||||
### (Re)starting synapse
|
||||
|
||||
Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
|
||||
|
||||
Finally, start/restart Synapse.
|
||||
Finally, start/restart Synapse.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# MSC 1711 Certificates FAQ
|
||||
# MSC1711 Certificates FAQ
|
||||
|
||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||
supports the r0.1 release of the server to server specification, but is
|
||||
@@ -42,9 +42,9 @@ imminent Matrix 1.0 release, you can also see our
|
||||
* It used to work just fine, why are you breaking everything?
|
||||
* Can I manage my own certificates rather than having Synapse renew
|
||||
certificates itself?
|
||||
* Do you still recommend against using a reverse-proxy on the federation port?
|
||||
* Do you still recommend against using a reverse proxy on the federation port?
|
||||
* Do I still need to give my TLS certificates to Synapse if I am using a
|
||||
reverse-proxy?
|
||||
reverse proxy?
|
||||
* Do I need the same certificate for the client and federation port?
|
||||
* How do I tell Synapse to reload my keys/certificates after I replace them?
|
||||
|
||||
@@ -107,10 +107,10 @@ hosted at a target domain of `customer.example.net`. Currently you should have
|
||||
an SRV record which looks like:
|
||||
|
||||
```
|
||||
_matrix._tcp.example.com. IN SRV 10 5 443 customer.example.net.
|
||||
_matrix._tcp.example.com. IN SRV 10 5 8000 customer.example.net.
|
||||
```
|
||||
|
||||
In this situation, you have two choices for how to proceed:
|
||||
In this situation, you have three choices for how to proceed:
|
||||
|
||||
#### Option 1: give Synapse a certificate for your matrix domain
|
||||
|
||||
@@ -123,12 +123,19 @@ doing one of the following:
|
||||
and `tls_private_key_path`, or:
|
||||
|
||||
* Use Synapse's [ACME support](./ACME.md), and forward port 80 on the
|
||||
`server_name` domain to your Synapse instance, or:
|
||||
`server_name` domain to your Synapse instance.
|
||||
|
||||
* Set up a reverse-proxy on port 8448 on the `server_name` domain, which
|
||||
forwards to Synapse. Once it is set up, you can remove the SRV record.
|
||||
#### Option 2: run Synapse behind a reverse proxy
|
||||
|
||||
#### Option 2: add a .well-known file to delegate your matrix traffic
|
||||
If you have an existing reverse proxy set up with correct TLS certificates for
|
||||
your domain, you can simply route all traffic through the reverse proxy by
|
||||
updating the SRV record appropriately (or removing it, if the proxy listens on
|
||||
8448).
|
||||
|
||||
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Option 3: add a .well-known file to delegate your matrix traffic
|
||||
|
||||
This will allow you to keep Synapse on a separate domain, without having to
|
||||
give it a certificate for the matrix domain.
|
||||
@@ -151,25 +158,32 @@ You can do this with a `.well-known` file as follows:
|
||||
`https://<server_name>/.well-known/matrix/server` with contents:
|
||||
|
||||
```json
|
||||
{"m.server": "<target domain>:<port>"}
|
||||
{"m.server": "<target server name>"}
|
||||
```
|
||||
|
||||
In the above example, `https://example.com/.well-known/matrix/server`
|
||||
should have the contents:
|
||||
where the target server name is resolved as usual (i.e. SRV lookup, falling
|
||||
back to talking to port 8448).
|
||||
|
||||
In the above example, where synapse is listening on port 8000,
|
||||
`https://example.com/.well-known/matrix/server` should have `m.server` set to one of:
|
||||
|
||||
1. `customer.example.net` ─ with a SRV record on
|
||||
`_matrix._tcp.customer.example.com` pointing to port 8000, or:
|
||||
|
||||
2. `customer.example.net` ─ updating synapse to listen on the default port
|
||||
8448, or:
|
||||
|
||||
3. `customer.example.net:8000` ─ ensuring that if there is a reverse proxy
|
||||
on `customer.example.net:8000` it correctly handles HTTP requests with
|
||||
Host header set to `customer.example.net:8000`.
|
||||
|
||||
```json
|
||||
{"m.server": "customer.example.net:443"}
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### Synapse 0.99.0 has just been released, what do I need to do right now?
|
||||
|
||||
Upgrade as soon as you can in preparation for Synapse 1.0.0.
|
||||
|
||||
### How do I upgrade?
|
||||
|
||||
Follow the upgrade notes here [UPGRADE.rst](https://github.com/matrix-org/synapse/blob/master/UPGRADE.rst)
|
||||
Upgrade as soon as you can in preparation for Synapse 1.0.0, and update your
|
||||
TLS certificates as [above](#configuring-certificates-for-compatibility-with-synapse-100).
|
||||
|
||||
### What will happen if I do not set up a valid federation certificate immediately?
|
||||
|
||||
@@ -186,39 +200,24 @@ homeserver will not be able to federate with any Synapse >= 1.0.0
|
||||
### When do I need a SRV record or .well-known URI?
|
||||
|
||||
If your homeserver listens on the default federation port (8448), and your
|
||||
server_name points to the host that your homeserver runs on, you do not need an
|
||||
SRV record or .well-known/matrix/server URI.\
|
||||
For instance, if you registered example.com and pointed its DNS A record at a
|
||||
`server_name` points to the host that your homeserver runs on, you do not need an
|
||||
SRV record or `.well-known/matrix/server` URI.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh Upcloud VPS or similar, you could install Synapse 0.99 on that host,
|
||||
giving it a server_name of example.com, and it would automatically generate a
|
||||
giving it a server_name of `example.com`, and it would automatically generate a
|
||||
valid TLS certificate for you via Let's Encrypt and no SRV record or
|
||||
.well-known URI would be needed.
|
||||
`.well-known` URI would be needed.
|
||||
|
||||
This is the common case, although you can add an SRV record or
|
||||
.well-known/matrix/server URI for completeness if you wish.
|
||||
`.well-known/matrix/server` URI for completeness if you wish.
|
||||
|
||||
**However**, if your server does not listen on port 8448, or if your server_name
|
||||
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||
does not point to the host that your homeserver runs on, you will need to let
|
||||
other servers know how to find it.
|
||||
|
||||
The easiest way to do this is with a .well-known/matrix/server URI on the
|
||||
webroot of the domain to advertise your server. For instance, if you ran
|
||||
"matrixhosting.com" and you were hosting a Matrix server for `example.com`, you
|
||||
would ask `example.com` to create a file at
|
||||
`https://example.com/.well-known/matrix/server` with contents:
|
||||
|
||||
```json
|
||||
{"m.server": "example.matrixhosting.com:8448"}
|
||||
```
|
||||
|
||||
...which would tell servers trying to connect to example.com to instead connect
|
||||
to example.matrixhosting.com on port 8448. You would then configure Synapse
|
||||
with a server_name of "example.com", but generate a TLS certificate for
|
||||
example.matrixhosting.com.
|
||||
|
||||
As an alternative, you can still use an SRV DNS record for the delegation, but
|
||||
this will require you to have a certificate for the matrix domain (example.com
|
||||
in this example). See "Can I still use an SRV record?".
|
||||
In this case, you should see ["If you do have an SRV record
|
||||
currently"](#if-you-do-have-an-srv-record-currently) above.
|
||||
|
||||
### Can I still use an SRV record?
|
||||
|
||||
@@ -244,13 +243,13 @@ also need to use a .well-known URI instead. However, see also "I have created a
|
||||
|
||||
### I have created a .well-known URI. Do I still need an SRV record?
|
||||
|
||||
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
|
||||
URL and follow any delegation it suggests. It will only then check for the
|
||||
As of Synapse 0.99, Synapse will first check for the existence of a `.well-known`
|
||||
URI and follow any delegation it suggests. It will only then check for the
|
||||
existence of an SRV record.
|
||||
|
||||
That means that the SRV record will often be redundant. However, you should
|
||||
remember that there may still be older versions of Synapse in the federation
|
||||
which do not understand .well-known URIs, so if you removed your SRV record you
|
||||
which do not understand `.well-known` URIs, so if you removed your SRV record you
|
||||
would no longer be able to federate with them.
|
||||
|
||||
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
|
||||
@@ -301,17 +300,20 @@ attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||
so.The only requirement is that there is a valid TLS cert present for
|
||||
federation end points.
|
||||
|
||||
### Do you still recommend against using a reverse-proxy on the federation port?
|
||||
### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse-proxy and manage their
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
Practically speaking, this is no longer necessary.
|
||||
|
||||
If you are using a reverse-proxy for all of your TLS traffic, then you can set
|
||||
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||
`no_tls: True`. In that case, the only reason Synapse needs the certificate is
|
||||
to populate a legacy 'tls_fingerprints' field in the federation API. This is
|
||||
ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will
|
||||
@@ -325,12 +327,12 @@ this, you can give it any TLS certificate at all. This will be fixed soon.
|
||||
|
||||
### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you doing so, particularly if you are using a
|
||||
reverse-proxy. However, Synapse will use the same certificate on any ports
|
||||
where TLS is configured.
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||
same certificate on any ports where TLS is configured.
|
||||
|
||||
### How do I tell Synapse to reload my keys/certificates after I replace them?
|
||||
|
||||
Synapse will reload the keys and certificates when it receives a SIGHUP - for
|
||||
example kill -HUP $(cat homeserver.pid). Alternatively, simply restart Synapse,
|
||||
though this will result in downtime while it restarts.
|
||||
example `kill -HUP $(cat homeserver.pid)`. Alternatively, simply restart
|
||||
Synapse, though this will result in downtime while it restarts.
|
||||
|
||||
112
docs/reverse_proxy.rst
Normal file
112
docs/reverse_proxy.rst
Normal file
@@ -0,0 +1,112 @@
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
**NOTE**: Your reverse proxy must not 'canonicalise' or 'normalise' the
|
||||
requested URI in any way (for example, by decoding ``%xx`` escapes). Beware
|
||||
that Apache *will* canonicalise URIs unless you specifify ``nocanon``.
|
||||
|
||||
When setting up a reverse proxy, remember that Matrix clients and other Matrix
|
||||
servers do not necessarily need to connect to your server via the same server
|
||||
name or port. Indeed, clients will use port 443 by default, whereas servers
|
||||
default to port 8448. Where these are different, we refer to the 'client port'
|
||||
and the 'federation port'. See `Setting up federation
|
||||
<../README.rst#setting-up-federation>`_ for more details of the algorithm used for
|
||||
federation connections.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
``https://matrix.example.com``, and other servers to connect at
|
||||
``https://example.com:8448``. Here are some example configurations:
|
||||
|
||||
* nginx::
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
server_name matrix.example.com;
|
||||
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
server_name example.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
* Caddy::
|
||||
|
||||
matrix.example.com {
|
||||
proxy /_matrix http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
proxy / http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
}
|
||||
|
||||
* Apache (note the ``nocanon`` options here!)::
|
||||
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
<VirtualHost *:8448>
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
<Location /_matrix>
|
||||
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||
</Location>
|
||||
</VirtualHost>
|
||||
|
||||
* HAProxy::
|
||||
|
||||
frontend https
|
||||
bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix hdr(host) -i matrix.example.com
|
||||
use_backend matrix if matrix
|
||||
|
||||
frontend matrix-federation
|
||||
bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||
``https://matrix.example.com:8448``) as the "Custom server" when connecting to
|
||||
Synapse from a client.
|
||||
@@ -137,7 +137,6 @@ for each stream so that on reconneciton it can start streaming from the correct
|
||||
place. Note: not all RDATA have valid tokens due to batching. See
|
||||
``RdataCommand`` for more details.
|
||||
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
@@ -221,3 +220,28 @@ SYNC (S, C)
|
||||
|
||||
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||
format of each command.
|
||||
|
||||
|
||||
Cache Invalidation Stream
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The cache invalidation stream is used to inform workers when they need to
|
||||
invalidate any of their caches in the data store. This is done by streaming all
|
||||
cache invalidations done on master down to the workers, assuming that any caches
|
||||
on the workers also exist on the master.
|
||||
|
||||
Each individual cache invalidation results in a row being sent down replication,
|
||||
which includes the cache name (the name of the function) and they key to
|
||||
invalidate. For example::
|
||||
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated at the
|
||||
same time with the same key. To reduce traffic we batch those invalidations into
|
||||
a single poke by defining a special cache name that workers understand to mean
|
||||
to expand to invalidate the correct caches.
|
||||
|
||||
Currently the special cache names are declared in ``synapse/storage/_base.py``
|
||||
and are:
|
||||
|
||||
1. ``cs_cache_fake`` ─ invalidates caches that depend on the current state
|
||||
|
||||
@@ -26,9 +26,8 @@ Configuration
|
||||
To make effective use of the workers, you will need to configure an HTTP
|
||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||
the correct worker, or to the main synapse instance. Note that this includes
|
||||
requests made to the federation port. The caveats regarding running a
|
||||
reverse-proxy on the federation port still apply (see
|
||||
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
||||
requests made to the federation port. See `<reverse_proxy.rst>`_ for
|
||||
information on setting up a reverse proxy.
|
||||
|
||||
To enable workers, you need to add two replication listeners to the master
|
||||
synapse, e.g.::
|
||||
@@ -223,6 +222,13 @@ following regular expressions::
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance::
|
||||
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
41
scripts-dev/check-newsfragment
Executable file
41
scripts-dev/check-newsfragment
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
set -e
|
||||
|
||||
# make sure that origin/develop is up to date
|
||||
git remote set-branches --add origin develop
|
||||
git fetch --depth=1 origin develop
|
||||
|
||||
UPSTREAM=origin/develop
|
||||
|
||||
# if there are changes in the debian directory, check that the debian changelog
|
||||
# has been updated
|
||||
if ! git diff --quiet $UPSTREAM... -- debian; then
|
||||
if git diff --quiet $UPSTREAM... -- debian/changelog; then
|
||||
echo "Updates to debian directory, but no update to the changelog." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if there are changes *outside* the debian directory, check that the
|
||||
# newsfragments have been updated.
|
||||
if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
|
||||
tox -e check-newsfragment
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "--------------------------"
|
||||
echo
|
||||
|
||||
# check that any new newsfiles on this branch end with a full stop.
|
||||
for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
|
||||
lastchar=`tr -d '\n' < $f | tail -c 1`
|
||||
if [ $lastchar != '.' ]; then
|
||||
echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
|
||||
"group_summary_users": ["is_public"],
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2018-9 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.99.0"
|
||||
__version__ = "0.99.2"
|
||||
|
||||
@@ -73,6 +73,7 @@ class EventTypes(object):
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
# These are used for validation
|
||||
|
||||
@@ -15,19 +15,38 @@
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import psutil
|
||||
from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import error, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
|
||||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_sighup_callbacks = []
|
||||
|
||||
|
||||
def register_sighup(func):
|
||||
"""
|
||||
Register a function to be called when a SIGHUP occurs.
|
||||
|
||||
Args:
|
||||
func (function): Function to be called when sent a SIGHUP signal.
|
||||
Will be called with a single argument, the homeserver.
|
||||
"""
|
||||
_sighup_callbacks.append(func)
|
||||
|
||||
|
||||
def start_worker_reactor(appname, config):
|
||||
""" Run the reactor in the main process
|
||||
@@ -136,9 +155,8 @@ def listen_metrics(bind_addresses, port):
|
||||
from prometheus_client import start_http_server
|
||||
|
||||
for host in bind_addresses:
|
||||
reactor.callInThread(start_http_server, int(port),
|
||||
addr=host, registry=RegistryProxy)
|
||||
logger.info("Metrics now reporting on %s:%d", host, port)
|
||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||
start_http_server(port, addr=host, registry=RegistryProxy)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
@@ -146,21 +164,23 @@ def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
Create a TCP socket for a port and several addresses
|
||||
|
||||
Returns:
|
||||
list (empty)
|
||||
list[twisted.internet.tcp.Port]: listening for TCP connections
|
||||
"""
|
||||
r = []
|
||||
for address in bind_addresses:
|
||||
try:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
factory,
|
||||
backlog,
|
||||
address
|
||||
r.append(
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
factory,
|
||||
backlog,
|
||||
address
|
||||
)
|
||||
)
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return []
|
||||
return r
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
@@ -187,5 +207,102 @@ def listen_ssl(
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
logger.info("Synapse now listening on port %d (TLS)", port)
|
||||
return r
|
||||
|
||||
|
||||
def refresh_certificate(hs):
|
||||
"""
|
||||
Refresh the TLS certificates that Synapse is using by re-reading them from
|
||||
disk and updating the TLS context factories to use them.
|
||||
"""
|
||||
|
||||
if not hs.config.has_tls_listener():
|
||||
# attempt to reload the certs for the good of the tls_fingerprints
|
||||
hs.config.read_certificate_from_disk(require_cert_and_key=False)
|
||||
return
|
||||
|
||||
hs.config.read_certificate_from_disk(require_cert_and_key=True)
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
||||
|
||||
if hs._listening_services:
|
||||
logger.info("Updating context factories...")
|
||||
for i in hs._listening_services:
|
||||
# When you listenSSL, it doesn't make an SSL port but a TCP one with
|
||||
# a TLS wrapping factory around the factory you actually want to get
|
||||
# requests. This factory attribute is public but missing from
|
||||
# Twisted's documentation.
|
||||
if isinstance(i.factory, TLSMemoryBIOFactory):
|
||||
addr = i.getHost()
|
||||
logger.info(
|
||||
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port,
|
||||
)
|
||||
# We want to replace TLS factories with a new one, with the new
|
||||
# TLS configuration. We do this by reaching in and pulling out
|
||||
# the wrappedFactory, and then re-wrapping it.
|
||||
i.factory = TLSMemoryBIOFactory(
|
||||
hs.tls_server_context_factory,
|
||||
False,
|
||||
i.factory.wrappedFactory
|
||||
)
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
def start(hs, listeners=None):
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
listeners (list[dict]): Listener configuration ('listeners' in homeserver.yaml)
|
||||
"""
|
||||
try:
|
||||
# Set up the SIGHUP machinery.
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
def handle_sighup(*args, **kwargs):
|
||||
for i in _sighup_callbacks:
|
||||
i(hs)
|
||||
|
||||
signal.signal(signal.SIGHUP, handle_sighup)
|
||||
|
||||
register_sighup(refresh_certificate)
|
||||
|
||||
# Load the certificate from disk.
|
||||
refresh_certificate(hs)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().start_profiling()
|
||||
|
||||
setup_sentry(hs)
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
reactor = hs.get_reactor()
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup_sentry(hs):
|
||||
"""Enable sentry integration, if enabled in configuration
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
"""
|
||||
|
||||
if not hs.config.sentry_enabled:
|
||||
return
|
||||
|
||||
import sentry_sdk
|
||||
sentry_sdk.init(
|
||||
dsn=hs.config.sentry_dsn,
|
||||
release=get_version_string(synapse),
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
scope.set_tag("matrix_server_name", hs.config.server_name)
|
||||
|
||||
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
@@ -168,12 +168,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ps, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-appservice", config)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
@@ -41,6 +40,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.login import LoginRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
@@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
@@ -93,6 +94,8 @@ class ClientReaderServer(HomeServer):
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
RegisterRestServlet(self).register(resource)
|
||||
LoginRestServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
@@ -173,17 +176,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-client-reader", config)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
@@ -194,17 +193,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-event-creator", config)
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
@@ -41,6 +40,7 @@ from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
@@ -63,6 +63,7 @@ class FederationReaderSlavedStore(
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedTransactionStore,
|
||||
@@ -87,6 +88,16 @@ class FederationReaderServer(HomeServer):
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
if name == "openid" and "federation" not in res["names"]:
|
||||
# Only load the openid resource separately if federation resource
|
||||
# is not specified since federation resource includes openid
|
||||
# resource.
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(
|
||||
self,
|
||||
servlet_groups=["openid"],
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -99,7 +110,8 @@ class FederationReaderServer(HomeServer):
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
),
|
||||
reactor=self.get_reactor()
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
@@ -160,17 +172,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation import send_queue
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
@@ -192,17 +191,8 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||
|
||||
|
||||
|
||||
@@ -21,12 +21,11 @@ from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
@@ -67,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
try:
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -250,17 +254,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,12 +15,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -28,7 +29,7 @@ from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
@@ -49,7 +50,6 @@ from synapse.app import _base
|
||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
@@ -86,7 +86,6 @@ def gz_wrap(r):
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
_listening_services = []
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -94,14 +93,13 @@ class SynapseHomeServer(HomeServer):
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
|
||||
if tls and config.no_tls:
|
||||
raise ConfigError(
|
||||
"Listener on port %i has TLS enabled, but no_tls is set" % (port,),
|
||||
)
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "openid" and "federation" in res["names"]:
|
||||
# Skip loading openid resource if federation is defined
|
||||
# since federation resource will include openid
|
||||
continue
|
||||
resources.update(self._configure_named_resource(
|
||||
name, res.get("compress", False),
|
||||
))
|
||||
@@ -126,7 +124,7 @@ class SynapseHomeServer(HomeServer):
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
return listen_ssl(
|
||||
ports = listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
@@ -137,10 +135,12 @@ class SynapseHomeServer(HomeServer):
|
||||
self.version_string,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d (TLS)", port)
|
||||
|
||||
else:
|
||||
return listen_tcp(
|
||||
ports = listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
@@ -149,8 +149,12 @@ class SynapseHomeServer(HomeServer):
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
),
|
||||
reactor=self.get_reactor(),
|
||||
)
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
|
||||
return ports
|
||||
|
||||
def _configure_named_resource(self, name, compress=False):
|
||||
"""Build a resource map for a named resource
|
||||
@@ -196,6 +200,11 @@ class SynapseHomeServer(HomeServer):
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
if name == "openid":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self, servlet_groups=["openid"]),
|
||||
})
|
||||
|
||||
if name in ["static", "client"]:
|
||||
resources.update({
|
||||
STATIC_PREFIX: File(
|
||||
@@ -241,10 +250,10 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
return resources
|
||||
|
||||
def start_listening(self):
|
||||
def start_listening(self, listeners):
|
||||
config = self.get_config()
|
||||
|
||||
for listener in config.listeners:
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listening_services.extend(
|
||||
self._listener_http(config, listener)
|
||||
@@ -260,14 +269,14 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "replication":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
for address in bind_addresses:
|
||||
factory = ReplicationStreamProtocolFactory(self)
|
||||
server_listener = reactor.listenTCP(
|
||||
listener["port"], factory, interface=address
|
||||
)
|
||||
services = listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
ReplicationStreamProtocolFactory(self),
|
||||
)
|
||||
for s in services:
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown", server_listener.stopListening,
|
||||
"before", "shutdown", s.stopListening,
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
@@ -328,20 +337,11 @@ def setup(config_options):
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
sighup_callbacks = []
|
||||
synapse.config.logger.setup_logging(
|
||||
config,
|
||||
use_worker_options=False,
|
||||
register_sighup=sighup_callbacks.append
|
||||
use_worker_options=False
|
||||
)
|
||||
|
||||
def handle_sighup(*args, **kwargs):
|
||||
for i in sighup_callbacks:
|
||||
i(*args, **kwargs)
|
||||
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, handle_sighup)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
@@ -377,76 +377,77 @@ def setup(config_options):
|
||||
|
||||
hs.setup()
|
||||
|
||||
def refresh_certificate(*args):
|
||||
@defer.inlineCallbacks
|
||||
def do_acme():
|
||||
"""
|
||||
Refresh the TLS certificates that Synapse is using by re-reading them
|
||||
from disk and updating the TLS context factories to use them.
|
||||
Reprovision an ACME certificate, if it's required.
|
||||
|
||||
Returns:
|
||||
Deferred[bool]: Whether the cert has been updated.
|
||||
"""
|
||||
logging.info("Reloading certificate from disk...")
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
acme = hs.get_acme_handler()
|
||||
|
||||
# Check how long the certificate is active for.
|
||||
cert_days_remaining = hs.config.is_disk_cert_valid(
|
||||
allow_self_signed=False
|
||||
)
|
||||
logging.info("Certificate reloaded.")
|
||||
|
||||
logging.info("Updating context factories...")
|
||||
for i in hs._listening_services:
|
||||
if isinstance(i.factory, TLSMemoryBIOFactory):
|
||||
i.factory = TLSMemoryBIOFactory(
|
||||
hs.tls_server_context_factory,
|
||||
False,
|
||||
i.factory.wrappedFactory
|
||||
)
|
||||
logging.info("Context factories updated.")
|
||||
# We want to reprovision if cert_days_remaining is None (meaning no
|
||||
# certificate exists), or the days remaining number it returns
|
||||
# is less than our re-registration threshold.
|
||||
provision = False
|
||||
|
||||
sighup_callbacks.append(refresh_certificate)
|
||||
if (
|
||||
cert_days_remaining is None or
|
||||
cert_days_remaining < hs.config.acme_reprovision_threshold
|
||||
):
|
||||
provision = True
|
||||
|
||||
if provision:
|
||||
yield acme.provision_certificate()
|
||||
|
||||
defer.returnValue(provision)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def reprovision_acme():
|
||||
"""
|
||||
Provision a certificate from ACME, if required, and reload the TLS
|
||||
certificate if it's renewed.
|
||||
"""
|
||||
reprovisioned = yield do_acme()
|
||||
if reprovisioned:
|
||||
_base.refresh_certificate(hs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start():
|
||||
try:
|
||||
# Check if the certificate is still valid.
|
||||
cert_days_remaining = hs.config.is_disk_cert_valid()
|
||||
|
||||
# Run the ACME provisioning code, if it's enabled.
|
||||
if hs.config.acme_enabled:
|
||||
# If ACME is enabled, we might need to provision a certificate
|
||||
# before starting.
|
||||
acme = hs.get_acme_handler()
|
||||
|
||||
# Start up the webservices which we will respond to ACME
|
||||
# challenges with.
|
||||
# challenges with, and then provision.
|
||||
yield acme.start_listening()
|
||||
yield do_acme()
|
||||
|
||||
# We want to reprovision if cert_days_remaining is None (meaning no
|
||||
# certificate exists), or the days remaining number it returns
|
||||
# is less than our re-registration threshold.
|
||||
if (cert_days_remaining is None) or (
|
||||
not cert_days_remaining > hs.config.acme_reprovision_threshold
|
||||
):
|
||||
yield acme.provision_certificate()
|
||||
# Check if it needs to be reprovisioned every day.
|
||||
hs.get_clock().looping_call(
|
||||
reprovision_acme,
|
||||
24 * 60 * 60 * 1000
|
||||
)
|
||||
|
||||
# Read the certificate from disk and build the context factories for
|
||||
# TLS.
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
_base.start(hs, config.listeners)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening()
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
except Exception as e:
|
||||
# If a DeferredList failed (like in listening on the ACME listener),
|
||||
# we need to print the subfailure explicitly.
|
||||
if isinstance(e, defer.FirstError):
|
||||
e.subFailure.printTraceback(sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
# Print the exception and bail out.
|
||||
print("Error during startup:", file=sys.stderr)
|
||||
|
||||
# Something else went wrong when starting. Print it and bail out.
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
# this gives better tracebacks than traceback.print_exc()
|
||||
Failure().printTraceback(file=sys.stderr)
|
||||
|
||||
if reactor.running:
|
||||
reactor.stop()
|
||||
sys.exit(1)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
@@ -455,7 +456,8 @@ def setup(config_options):
|
||||
|
||||
|
||||
class SynapseService(service.Service):
|
||||
"""A twisted Service class that will start synapse. Used to run synapse
|
||||
"""
|
||||
A twisted Service class that will start synapse. Used to run synapse
|
||||
via twistd and a .tac.
|
||||
"""
|
||||
def __init__(self, config):
|
||||
@@ -553,6 +555,9 @@ def run(hs):
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
stats["database_engine"] = hs.get_datastore().database_engine_name
|
||||
stats["database_server_version"] = hs.get_datastore().get_server_version()
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
yield hs.get_simple_http_client().put_json(
|
||||
|
||||
@@ -26,7 +26,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
@@ -160,17 +159,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-media-repository", config)
|
||||
|
||||
|
||||
@@ -224,11 +224,10 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
_base.start(ps, config.worker_listeners)
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
|
||||
@@ -445,12 +445,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
@@ -220,17 +219,7 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
reactor.callWhenRunning(_base.start, ss, config.worker_listeners)
|
||||
|
||||
_base.start_worker_reactor("synapse-user-dir", config)
|
||||
|
||||
|
||||
@@ -257,7 +257,7 @@ class Config(object):
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Used with 'generate-*' options to specify where files such as"
|
||||
" certs and signing keys should be stored in, unless explicitly"
|
||||
" signing keys should be stored, unless explicitly"
|
||||
" specified in the config.",
|
||||
)
|
||||
config_parser.add_argument(
|
||||
@@ -313,16 +313,11 @@ class Config(object):
|
||||
print(
|
||||
(
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" %r. Please review this file and customise it"
|
||||
" to your needs."
|
||||
)
|
||||
% (config_path, server_name)
|
||||
)
|
||||
print(
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
return
|
||||
else:
|
||||
print(
|
||||
|
||||
@@ -24,6 +24,7 @@ class ApiConfig(Config):
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
])
|
||||
|
||||
@@ -32,9 +33,11 @@ class ApiConfig(Config):
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
#
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
- "{RoomAvatar}"
|
||||
- "{RoomEncryption}"
|
||||
- "{Name}"
|
||||
""".format(**vars(EventTypes))
|
||||
|
||||
@@ -38,10 +38,12 @@ class AppServiceConfig(Config):
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
#
|
||||
app_service_config_files: []
|
||||
|
||||
# Whether or not to track application service IP addresses. Implicitly
|
||||
# enables MAU tracking for application service users.
|
||||
#
|
||||
track_appservice_user_ips: False
|
||||
"""
|
||||
|
||||
|
||||
@@ -30,19 +30,22 @@ class CaptchaConfig(Config):
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
#
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
#
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
# public/private key.
|
||||
#
|
||||
enable_registration_captcha: False
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
|
||||
"""
|
||||
|
||||
@@ -38,6 +38,7 @@ class CasConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable CAS for registration and login.
|
||||
#
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
|
||||
@@ -54,20 +54,20 @@ DEFAULT_CONFIG = """\
|
||||
# for an account. Has no effect unless `require_at_registration` is enabled.
|
||||
# Defaults to "Privacy Policy".
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
|
||||
@@ -24,9 +24,11 @@ class GroupsConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# Whether to allow non server admins to create groups on this server
|
||||
#
|
||||
enable_group_creation: false
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# group_creation_prefix: "unofficial/"
|
||||
#
|
||||
#group_creation_prefix: "unofficial/"
|
||||
"""
|
||||
|
||||
@@ -42,7 +42,7 @@ from .voip import VoipConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
|
||||
@@ -46,8 +46,8 @@ class JWTConfig(Config):
|
||||
return """\
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
#jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
"""
|
||||
|
||||
@@ -40,7 +40,7 @@ class KeyConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.signing_key = self.read_signing_key(config["signing_key_path"])
|
||||
self.old_signing_keys = self.read_old_signing_keys(
|
||||
config["old_signing_keys"]
|
||||
config.get("old_signing_keys", {})
|
||||
)
|
||||
self.key_refresh_interval = self.parse_duration(
|
||||
config["key_refresh_interval"]
|
||||
@@ -56,7 +56,7 @@ class KeyConfig(Config):
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing missing macaroon_secret_key")
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
@@ -83,24 +83,29 @@ class KeyConfig(Config):
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
# the registration_shared_secret is used, if one is given; otherwise,
|
||||
# a secret key is derived from the signing key.
|
||||
#
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
#
|
||||
%(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
#
|
||||
signing_key_path: "%(base_key_name)s.signing.key"
|
||||
|
||||
# The keys that the server used to sign messages with but won't use
|
||||
# to sign new messages. E.g. it has lost its private key
|
||||
old_signing_keys: {}
|
||||
#
|
||||
#old_signing_keys:
|
||||
# "ed25519:auto":
|
||||
# # Base64 encoded public key
|
||||
# key: "The public part of your old signing key."
|
||||
@@ -111,9 +116,11 @@ class KeyConfig(Config):
|
||||
# Used to set the valid_until_ts in /key/v2 APIs.
|
||||
# Determines how quickly servers will query to check which keys
|
||||
# are still valid.
|
||||
#
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
#
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
import logging
|
||||
import logging.config
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from string import Template
|
||||
|
||||
@@ -24,6 +23,7 @@ import yaml
|
||||
from twisted.logger import STDLibLogObserver, globalLogBeginner
|
||||
|
||||
import synapse
|
||||
from synapse.app import _base as appbase
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
@@ -83,6 +83,7 @@ class LoggingConfig(Config):
|
||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||
return """
|
||||
# A yaml python logging config file
|
||||
#
|
||||
log_config: "%(log_config)s"
|
||||
""" % locals()
|
||||
|
||||
@@ -127,7 +128,7 @@ class LoggingConfig(Config):
|
||||
)
|
||||
|
||||
|
||||
def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
def setup_logging(config, use_worker_options=False):
|
||||
""" Set up python logging
|
||||
|
||||
Args:
|
||||
@@ -140,12 +141,6 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
register_sighup (func | None): Function to call to register a
|
||||
sighup handler.
|
||||
"""
|
||||
if not register_sighup:
|
||||
if getattr(signal, "SIGHUP"):
|
||||
register_sighup = lambda x: signal.signal(signal.SIGHUP, x)
|
||||
else:
|
||||
register_sighup = lambda x: None
|
||||
|
||||
log_config = (config.worker_log_config if use_worker_options
|
||||
else config.log_config)
|
||||
log_file = (config.worker_log_file if use_worker_options
|
||||
@@ -187,7 +182,7 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
def sighup(signum, stack):
|
||||
def sighup(*args):
|
||||
pass
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
@@ -200,14 +195,14 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
with open(log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
|
||||
def sighup(signum, stack):
|
||||
def sighup(*args):
|
||||
# it might be better to use a file watcher or something for this.
|
||||
load_log_config()
|
||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||
|
||||
load_log_config()
|
||||
|
||||
register_sighup(sighup)
|
||||
appbase.register_sighup(sighup)
|
||||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
@@ -248,3 +243,5 @@ def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
[_log],
|
||||
redirectStandardIO=not config.no_redirect_stdio,
|
||||
)
|
||||
if not config.no_redirect_stdio:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
@@ -13,7 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_SENTRY = (
|
||||
"""Missing sentry-sdk library. This is required to enable sentry
|
||||
integration.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class MetricsConfig(Config):
|
||||
@@ -23,12 +29,38 @@ class MetricsConfig(Config):
|
||||
self.metrics_port = config.get("metrics_port")
|
||||
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
|
||||
|
||||
self.sentry_enabled = "sentry" in config
|
||||
if self.sentry_enabled:
|
||||
try:
|
||||
import sentry_sdk # noqa F401
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_SENTRY)
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
raise ConfigError(
|
||||
"sentry.dsn field is required when sentry integration is enabled",
|
||||
)
|
||||
|
||||
def default_config(self, report_stats=None, **kwargs):
|
||||
res = """\
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
#
|
||||
enable_metrics: False
|
||||
|
||||
# Enable sentry integration
|
||||
# NOTE: While attempts are made to ensure that the logs don't contain
|
||||
# any sensitive information, this cannot be guaranteed. By enabling
|
||||
# this option the sentry server may therefore receive sensitive
|
||||
# information, and it in turn may then diseminate sensitive information
|
||||
# through insecure notification channels if so configured.
|
||||
#
|
||||
#sentry:
|
||||
# dsn: "..."
|
||||
|
||||
# Whether or not to report anonymized homeserver usage statistics.
|
||||
"""
|
||||
|
||||
if report_stats is None:
|
||||
|
||||
@@ -28,6 +28,7 @@ class PasswordConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable password for login.
|
||||
#
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
|
||||
@@ -52,18 +52,18 @@ class PasswordAuthProviderConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
#password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
|
||||
@@ -51,11 +51,11 @@ class PushConfig(Config):
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
# include_content: true
|
||||
"""
|
||||
|
||||
@@ -32,27 +32,34 @@ class RatelimitConfig(Config):
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
#
|
||||
rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
#
|
||||
rc_message_burst_count: 10.0
|
||||
|
||||
# The federation window size in milliseconds
|
||||
#
|
||||
federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
#
|
||||
federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
#
|
||||
federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
#
|
||||
federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
#
|
||||
federation_rc_concurrent: 3
|
||||
"""
|
||||
|
||||
@@ -70,28 +70,29 @@ class RegistrationConfig(Config):
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
# registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
#registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Explicitly disable asking for MSISDNs from the registration
|
||||
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
|
||||
#
|
||||
# disable_msisdn_registration = True
|
||||
#disable_msisdn_registration: True
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
#
|
||||
%(registration_shared_secret)s
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
@@ -99,11 +100,13 @@ class RegistrationConfig(Config):
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
#
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
# participate in rooms hosted on this server which have been made
|
||||
# accessible to anonymous users.
|
||||
#
|
||||
allow_guest_access: False
|
||||
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
@@ -112,27 +115,30 @@ class RegistrationConfig(Config):
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
# default_identity_server: https://matrix.org
|
||||
#default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
#
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
# - "#example:example.com"
|
||||
|
||||
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||
# the rooms exist by creating them when the first user on the
|
||||
# homeserver registers.
|
||||
# Setting to false means that if the rooms are not manually created,
|
||||
# users cannot be auto-joined since they do not exist.
|
||||
#
|
||||
autocreate_auto_join_rooms: true
|
||||
""" % locals()
|
||||
|
||||
|
||||
@@ -180,29 +180,34 @@ class ContentRepositoryConfig(Config):
|
||||
uploads_path = os.path.join(data_dir_path, "uploads")
|
||||
return r"""
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
#
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
# Media storage providers allow media to be stored in different
|
||||
# locations.
|
||||
# media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
# Directory where in-progress uploads are stored.
|
||||
#
|
||||
uploads_path: "%(uploads_path)s"
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
#
|
||||
max_upload_size: "10M"
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
#
|
||||
max_image_pixels: "32M"
|
||||
|
||||
# Whether to generate new thumbnails on the fly to precisely match
|
||||
@@ -210,9 +215,11 @@ class ContentRepositoryConfig(Config):
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalculated list.
|
||||
#
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
# List of thumbnails to precalculate when an image is uploaded.
|
||||
#
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
@@ -233,6 +240,7 @@ class ContentRepositoryConfig(Config):
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
#
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
@@ -243,16 +251,16 @@ class ContentRepositoryConfig(Config):
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
@@ -260,8 +268,8 @@ class ContentRepositoryConfig(Config):
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
#url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
@@ -279,26 +287,25 @@ class ContentRepositoryConfig(Config):
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -20,12 +20,37 @@ from ._base import Config, ConfigError
|
||||
|
||||
class RoomDirectoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
alias_creation_rules = config["alias_creation_rules"]
|
||||
alias_creation_rules = config.get("alias_creation_rules")
|
||||
|
||||
self._alias_creation_rules = [
|
||||
_AliasRule(rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
if alias_creation_rules is not None:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule("alias_creation_rules", rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
else:
|
||||
self._alias_creation_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"alias_creation_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
room_list_publication_rules = config.get("room_list_publication_rules")
|
||||
|
||||
if room_list_publication_rules is not None:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule("room_list_publication_rules", rule)
|
||||
for rule in room_list_publication_rules
|
||||
]
|
||||
else:
|
||||
self._room_list_publication_rules = [
|
||||
_RoomDirectoryRule(
|
||||
"room_list_publication_rules", {
|
||||
"action": "allow",
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -33,60 +58,138 @@ class RoomDirectoryConfig(Config):
|
||||
# on this server.
|
||||
#
|
||||
# The format of this option is a list of rules that contain globs that
|
||||
# match against user_id and the new alias (fully qualified with server
|
||||
# name). The action in the first rule that matches is taken, which can
|
||||
# currently either be "allow" or "deny".
|
||||
# match against user_id, room_id and the new alias (fully qualified with
|
||||
# server name). The action in the first rule that matches is taken,
|
||||
# which can currently either be "allow" or "deny".
|
||||
#
|
||||
# If no rules match the request is denied.
|
||||
alias_creation_rules:
|
||||
- user_id: "*"
|
||||
alias: "*"
|
||||
action: allow
|
||||
# Missing user_id/room_id/alias fields default to "*".
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can create aliases.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches against the creator of the alias
|
||||
# alias: Matches against the alias being created
|
||||
# room_id: Matches against the room ID the alias is being pointed at
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#alias_creation_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
|
||||
# The `room_list_publication_rules` option controls who can publish and
|
||||
# which rooms can be published in the public room list.
|
||||
#
|
||||
# The format of this option is the same as that for
|
||||
# `alias_creation_rules`.
|
||||
#
|
||||
# If the room has one or more aliases associated with it, only one of
|
||||
# the aliases needs to match the alias rule. If there are no aliases
|
||||
# then only rules with `alias: *` match.
|
||||
#
|
||||
# If no rules match the request is denied. An empty list means no one
|
||||
# can publish rooms.
|
||||
#
|
||||
# Options for the rules include:
|
||||
#
|
||||
# user_id: Matches agaisnt the creator of the alias
|
||||
# room_id: Matches against the room ID being published
|
||||
# alias: Matches against any current local or canonical aliases
|
||||
# associated with the room
|
||||
# action: Whether to "allow" or "deny" the request if the rule matches
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
#room_list_publication_rules:
|
||||
# - user_id: "*"
|
||||
# alias: "*"
|
||||
# room_id: "*"
|
||||
# action: allow
|
||||
"""
|
||||
|
||||
def is_alias_creation_allowed(self, user_id, alias):
|
||||
def is_alias_creation_allowed(self, user_id, room_id, alias):
|
||||
"""Checks if the given user is allowed to create the given alias
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean: True if user is allowed to crate the alias
|
||||
"""
|
||||
for rule in self._alias_creation_rules:
|
||||
if rule.matches(user_id, alias):
|
||||
if rule.matches(user_id, room_id, [alias]):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
def is_publishing_room_allowed(self, user_id, room_id, aliases):
|
||||
"""Checks if the given user is allowed to publish the room
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): any local aliases associated with the room
|
||||
|
||||
Returns:
|
||||
boolean: True if user can publish room
|
||||
"""
|
||||
for rule in self._room_list_publication_rules:
|
||||
if rule.matches(user_id, room_id, aliases):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class _AliasRule(object):
|
||||
def __init__(self, rule):
|
||||
class _RoomDirectoryRule(object):
|
||||
"""Helper class to test whether a room directory action is allowed, like
|
||||
creating an alias or publishing a room.
|
||||
"""
|
||||
|
||||
def __init__(self, option_name, rule):
|
||||
"""
|
||||
Args:
|
||||
option_name (str): Name of the config option this rule belongs to
|
||||
rule (dict): The rule as specified in the config
|
||||
"""
|
||||
|
||||
action = rule["action"]
|
||||
user_id = rule["user_id"]
|
||||
alias = rule["alias"]
|
||||
user_id = rule.get("user_id", "*")
|
||||
room_id = rule.get("room_id", "*")
|
||||
alias = rule.get("alias", "*")
|
||||
|
||||
if action in ("allow", "deny"):
|
||||
self.action = action
|
||||
else:
|
||||
raise ConfigError(
|
||||
"alias_creation_rules rules can only have action of 'allow'"
|
||||
" or 'deny'"
|
||||
"%s rules can only have action of 'allow'"
|
||||
" or 'deny'" % (option_name,)
|
||||
)
|
||||
|
||||
self._alias_matches_all = alias == "*"
|
||||
|
||||
try:
|
||||
self._user_id_regex = glob_to_regex(user_id)
|
||||
self._alias_regex = glob_to_regex(alias)
|
||||
self._room_id_regex = glob_to_regex(room_id)
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||
|
||||
def matches(self, user_id, alias):
|
||||
"""Tests if this rule matches the given user_id and alias.
|
||||
def matches(self, user_id, room_id, aliases):
|
||||
"""Tests if this rule matches the given user_id, room_id and aliases.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
alias (str)
|
||||
room_id (str)
|
||||
aliases (list[str]): The associated aliases to the room. Will be a
|
||||
single element for testing alias creation, and can be empty for
|
||||
testing room publishing.
|
||||
|
||||
Returns:
|
||||
boolean
|
||||
@@ -96,7 +199,22 @@ class _AliasRule(object):
|
||||
if not self._user_id_regex.match(user_id):
|
||||
return False
|
||||
|
||||
if not self._alias_regex.match(alias):
|
||||
if not self._room_id_regex.match(room_id):
|
||||
return False
|
||||
|
||||
return True
|
||||
# We only have alias checks left, so we can short circuit if the alias
|
||||
# rule matches everything.
|
||||
if self._alias_matches_all:
|
||||
return True
|
||||
|
||||
# If we are not given any aliases then this rule only matches if the
|
||||
# alias glob matches all aliases, which we checked above.
|
||||
if not aliases:
|
||||
return False
|
||||
|
||||
# Otherwise, we just need one alias to match
|
||||
for alias in aliases:
|
||||
if self._alias_regex.match(alias):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -67,44 +67,43 @@ class SAML2Config(Config):
|
||||
return """
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
#
|
||||
# saml2_config:
|
||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||
# See pysaml2 docs for format of config.
|
||||
#
|
||||
# # The following is the configuration for the pysaml2 Service Provider.
|
||||
# # See pysaml2 docs for format of config.
|
||||
# #
|
||||
# # Default values will be used for the 'entityid' and 'service' settings,
|
||||
# # so it is not normally necessary to specify them unless you need to
|
||||
# # override them.
|
||||
# Default values will be used for the 'entityid' and 'service' settings,
|
||||
# so it is not normally necessary to specify them unless you need to
|
||||
# override them.
|
||||
#
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# # local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#saml2_config:
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# #local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # The following is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The rest of sp_config is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
#
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# # config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
""" % {"config_dir_path": config_dir_path}
|
||||
|
||||
@@ -24,6 +24,14 @@ from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.Logger(__name__)
|
||||
|
||||
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
|
||||
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
|
||||
# on IPv6 when '::' is set.
|
||||
#
|
||||
# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
|
||||
# in the list.
|
||||
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
|
||||
@@ -118,16 +126,38 @@ class ServerConfig(Config):
|
||||
self.public_baseurl += '/'
|
||||
self.start_pushers = config.get("start_pushers", True)
|
||||
|
||||
self.listeners = config.get("listeners", [])
|
||||
self.listeners = []
|
||||
for listener in config.get("listeners", []):
|
||||
if not isinstance(listener.get("port", None), int):
|
||||
raise ConfigError(
|
||||
"Listener configuration is lacking a valid 'port' option"
|
||||
)
|
||||
|
||||
if listener.setdefault("tls", False):
|
||||
# no_tls is not really supported any more, but let's grandfather it in
|
||||
# here.
|
||||
if config.get("no_tls", False):
|
||||
logger.info(
|
||||
"Ignoring TLS-enabled listener on port %i due to no_tls"
|
||||
)
|
||||
continue
|
||||
|
||||
for listener in self.listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||
|
||||
# if bind_address was specified, add it to the list of addresses
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
elif not bind_addresses:
|
||||
bind_addresses.append('')
|
||||
|
||||
# if we still have an empty list of addresses, use the default list
|
||||
if not bind_addresses:
|
||||
if listener['type'] == 'metrics':
|
||||
# the metrics listener doesn't support IPv6
|
||||
bind_addresses.append('0.0.0.0')
|
||||
else:
|
||||
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
||||
|
||||
self.listeners.append(listener)
|
||||
|
||||
if not self.web_client_location:
|
||||
_warn_if_webclient_configured(self.listeners)
|
||||
@@ -136,6 +166,9 @@ class ServerConfig(Config):
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
if bind_port:
|
||||
if config.get("no_tls", False):
|
||||
raise ConfigError("no_tls is incompatible with bind_port")
|
||||
|
||||
self.listeners = []
|
||||
bind_host = config.get("bind_host", "")
|
||||
gzip_responses = config.get("gzip_responses", True)
|
||||
@@ -182,6 +215,7 @@ class ServerConfig(Config):
|
||||
"port": manhole,
|
||||
"bind_addresses": ["127.0.0.1"],
|
||||
"type": "manhole",
|
||||
"tls": False,
|
||||
})
|
||||
|
||||
metrics_port = config.get("metrics_port")
|
||||
@@ -207,6 +241,9 @@ class ServerConfig(Config):
|
||||
|
||||
_check_resource_config(self.listeners)
|
||||
|
||||
def has_tls_listener(self):
|
||||
return any(l["tls"] for l in self.listeners)
|
||||
|
||||
def default_config(self, server_name, data_dir_path, **kwargs):
|
||||
_, bind_port = parse_and_validate_server_name(server_name)
|
||||
if bind_port is not None:
|
||||
@@ -249,19 +286,20 @@ class ServerConfig(Config):
|
||||
#
|
||||
# This setting requires the affinity package to be installed!
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
#cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# The path to the web client which will be served at /_matrix/client/
|
||||
# if 'webclient' is configured under the 'listeners' configuration.
|
||||
#
|
||||
# web_client_location: "/path/to/web/root"
|
||||
#web_client_location: "/path/to/web/root"
|
||||
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
# enter into the 'custom HS URL' field on their client. If you
|
||||
# use synapse with a reverse proxy, this should be the URL to reach
|
||||
# synapse via the proxy.
|
||||
# public_baseurl: https://example.com/
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
@@ -272,15 +310,25 @@ class ServerConfig(Config):
|
||||
use_presence: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
#
|
||||
#gc_thresholds: [700, 10, 10]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# filter_timeline_limit: 5000
|
||||
#
|
||||
#filter_timeline_limit: 5000
|
||||
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
#
|
||||
#block_non_admin_invites: True
|
||||
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
#
|
||||
#enable_search: false
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
@@ -288,117 +336,145 @@ class ServerConfig(Config):
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
# federation_domain_whitelist:
|
||||
#federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
#
|
||||
# Options for each listener include:
|
||||
#
|
||||
# port: the TCP port to bind to
|
||||
#
|
||||
# bind_addresses: a list of local addresses to listen on. The default is
|
||||
# 'all local interfaces'.
|
||||
#
|
||||
# type: the type of listener. Normally 'http', but other valid options are:
|
||||
# 'manhole' (see docs/manhole.md),
|
||||
# 'metrics' (see docs/metrics-howto.rst),
|
||||
# 'replication' (see docs/workers.rst).
|
||||
#
|
||||
# tls: set to true to enable TLS for this listener. Will use the TLS
|
||||
# key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
#
|
||||
# x_forwarded: Only valid for an 'http' listener. Set to true to use the
|
||||
# X-Forwarded-For header as the client IP. Useful when Synapse is
|
||||
# behind a reverse-proxy.
|
||||
#
|
||||
# resources: Only valid for an 'http' listener. A list of resources to host
|
||||
# on this port. Options for each resource are:
|
||||
#
|
||||
# names: a list of names of HTTP resources. See below for a list of
|
||||
# valid resource names.
|
||||
#
|
||||
# compress: set to true to enable HTTP comression for this resource.
|
||||
#
|
||||
# additional_resources: Only valid for an 'http' listener. A map of
|
||||
# additional endpoints which should be loaded via dynamic modules.
|
||||
#
|
||||
# Valid resource names are:
|
||||
#
|
||||
# client: the client-server API (/_matrix/client). Also implies 'media' and
|
||||
# 'static'.
|
||||
#
|
||||
# consent: user consent forms (/_matrix/consent). See
|
||||
# docs/consent_tracking.md.
|
||||
#
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
#
|
||||
# keys: the key discovery API (/_matrix/keys).
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
# metrics: the metrics interface. See docs/metrics-howto.rst.
|
||||
#
|
||||
# openid: OpenID authentication.
|
||||
#
|
||||
# replication: the HTTP replication API (/_synapse/replication). See
|
||||
# docs/workers.rst.
|
||||
#
|
||||
# static: static resources under synapse/static (/_matrix/static). (Mostly
|
||||
# useful for 'fallback authentication'.)
|
||||
#
|
||||
# webclient: A web client. Requires web_client_location to be set.
|
||||
#
|
||||
listeners:
|
||||
# Main HTTPS listener
|
||||
# For when matrix traffic is sent directly to synapse.
|
||||
-
|
||||
# The port to listen for HTTPS requests on.
|
||||
port: %(bind_port)s
|
||||
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
|
||||
#
|
||||
# Disabled by default. To enable it, uncomment the following. (Note that you
|
||||
# will also need to give Synapse a TLS key and certificate: see the TLS section
|
||||
# below.)
|
||||
#
|
||||
#- port: %(bind_port)s
|
||||
# type: http
|
||||
# tls: true
|
||||
# resources:
|
||||
# - names: [client, federation]
|
||||
|
||||
# Local addresses to listen on.
|
||||
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||
# addresses by default. For most other OSes, this will only listen
|
||||
# on IPv6.
|
||||
bind_addresses:
|
||||
- '::'
|
||||
- '0.0.0.0'
|
||||
|
||||
# This is a 'http' listener, allows us to specify 'resources'.
|
||||
type: http
|
||||
|
||||
tls: true
|
||||
|
||||
# Use the X-Forwarded-For (XFF) header as the client IP and not the
|
||||
# actual client IP.
|
||||
x_forwarded: false
|
||||
|
||||
# List of HTTP resources to serve on this listener.
|
||||
resources:
|
||||
-
|
||||
# List of resources to host on this listener.
|
||||
names:
|
||||
- client # The client-server APIs, both v1 and v2
|
||||
# - webclient # A web client. Requires web_client_location to be set.
|
||||
|
||||
# Should synapse compress HTTP responses to clients that support it?
|
||||
# This should be disabled if running synapse behind a load balancer
|
||||
# that can do automatic compression.
|
||||
compress: true
|
||||
|
||||
- names: [federation] # Federation APIs
|
||||
compress: false
|
||||
|
||||
# optional list of additional endpoints which can be loaded via
|
||||
# dynamic modules
|
||||
# additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Unsecure HTTP listener,
|
||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
|
||||
# that unwraps TLS.
|
||||
#
|
||||
# If you plan to use a reverse proxy, please see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
|
||||
#
|
||||
- port: %(unsecure_port)s
|
||||
tls: false
|
||||
bind_addresses: ['::', '0.0.0.0']
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: http
|
||||
|
||||
x_forwarded: false
|
||||
x_forwarded: true
|
||||
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
- names: [federation]
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
# example additonal_resources:
|
||||
#
|
||||
#additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
#
|
||||
#- port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
## Homeserver blocking ##
|
||||
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#admin_contact: 'mailto:admin@server.com'
|
||||
|
||||
# Global blocking
|
||||
#
|
||||
#hs_disabled: False
|
||||
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
#limit_usage_by_mau: False
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
#mau_stats_only: False
|
||||
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
#
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
# enable_search: true
|
||||
#mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
@@ -480,6 +556,7 @@ KNOWN_RESOURCES = (
|
||||
'keys',
|
||||
'media',
|
||||
'metrics',
|
||||
'openid',
|
||||
'replication',
|
||||
'static',
|
||||
'webclient',
|
||||
|
||||
@@ -30,11 +30,11 @@ DEFAULT_CONFIG = """\
|
||||
# It's also possible to override the room name, the display name of the
|
||||
# "notices" user, and the avatar for the user.
|
||||
#
|
||||
# server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
#server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ class SpamCheckerConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
#spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
|
||||
@@ -19,13 +19,15 @@ import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.config._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TlsConfig(Config):
|
||||
@@ -36,18 +38,37 @@ class TlsConfig(Config):
|
||||
acme_config = {}
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
self.acme_url = acme_config.get(
|
||||
|
||||
# hyperlink complains on py2 if this is not a Unicode
|
||||
self.acme_url = six.text_type(acme_config.get(
|
||||
"url", u"https://acme-v01.api.letsencrypt.org/directory"
|
||||
)
|
||||
))
|
||||
self.acme_port = acme_config.get("port", 80)
|
||||
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
|
||||
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
|
||||
self.acme_domain = acme_config.get("domain", config.get("server_name"))
|
||||
|
||||
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
||||
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
||||
self._original_tls_fingerprints = config["tls_fingerprints"]
|
||||
|
||||
if self.has_tls_listener():
|
||||
if not self.tls_certificate_file:
|
||||
raise ConfigError(
|
||||
"tls_certificate_path must be specified if TLS-enabled listeners are "
|
||||
"configured."
|
||||
)
|
||||
if not self.tls_private_key_file:
|
||||
raise ConfigError(
|
||||
"tls_private_key_path must be specified if TLS-enabled listeners are "
|
||||
"configured."
|
||||
)
|
||||
|
||||
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
|
||||
|
||||
if self._original_tls_fingerprints is None:
|
||||
self._original_tls_fingerprints = []
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
self.no_tls = config.get("no_tls", False)
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
@@ -60,10 +81,14 @@ class TlsConfig(Config):
|
||||
self.tls_certificate = None
|
||||
self.tls_private_key = None
|
||||
|
||||
def is_disk_cert_valid(self):
|
||||
def is_disk_cert_valid(self, allow_self_signed=True):
|
||||
"""
|
||||
Is the certificate we have on disk valid, and if so, for how long?
|
||||
|
||||
Args:
|
||||
allow_self_signed (bool): Should we allow the certificate we
|
||||
read to be self signed?
|
||||
|
||||
Returns:
|
||||
int: Days remaining of certificate validity.
|
||||
None: No certificate exists.
|
||||
@@ -84,6 +109,12 @@ class TlsConfig(Config):
|
||||
logger.exception("Failed to parse existing certificate off disk!")
|
||||
raise
|
||||
|
||||
if not allow_self_signed:
|
||||
if tls_certificate.get_subject() == tls_certificate.get_issuer():
|
||||
raise ValueError(
|
||||
"TLS Certificate is self signed, and this is not permitted"
|
||||
)
|
||||
|
||||
# YYYYMMDDhhmmssZ -- in UTC
|
||||
expires_on = datetime.strptime(
|
||||
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
|
||||
@@ -92,36 +123,40 @@ class TlsConfig(Config):
|
||||
days_remaining = (expires_on - now).days
|
||||
return days_remaining
|
||||
|
||||
def read_certificate_from_disk(self):
|
||||
def read_certificate_from_disk(self, require_cert_and_key):
|
||||
"""
|
||||
Read the certificates from disk.
|
||||
"""
|
||||
self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
|
||||
Read the certificates and private key from disk.
|
||||
|
||||
# Check if it is self-signed, and issue a warning if so.
|
||||
if self.tls_certificate.get_issuer() == self.tls_certificate.get_subject():
|
||||
warnings.warn(
|
||||
(
|
||||
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
|
||||
"Please either provide a valid certificate, or use Synapse's ACME "
|
||||
"support to provision one."
|
||||
Args:
|
||||
require_cert_and_key (bool): set to True to throw an error if the certificate
|
||||
and key file are not given
|
||||
"""
|
||||
if require_cert_and_key:
|
||||
self.tls_private_key = self.read_tls_private_key()
|
||||
self.tls_certificate = self.read_tls_certificate()
|
||||
elif self.tls_certificate_file:
|
||||
# we only need the certificate for the tls_fingerprints. Reload it if we
|
||||
# can, but it's not a fatal error if we can't.
|
||||
try:
|
||||
self.tls_certificate = self.read_tls_certificate()
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to read TLS certificate (%s). Ignoring as no "
|
||||
"tls listeners enabled.", e,
|
||||
)
|
||||
)
|
||||
|
||||
if not self.no_tls:
|
||||
self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
if self.tls_certificate:
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
@@ -137,6 +172,8 @@ class TlsConfig(Config):
|
||||
|
||||
return (
|
||||
"""\
|
||||
## TLS ##
|
||||
|
||||
# PEM-encoded X509 certificate for TLS.
|
||||
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
|
||||
# certificate, signed by a recognised Certificate Authority.
|
||||
@@ -144,10 +181,11 @@ class TlsConfig(Config):
|
||||
# See 'ACME support' below to enable auto-provisioning this certificate via
|
||||
# Let's Encrypt.
|
||||
#
|
||||
tls_certificate_path: "%(tls_certificate_path)s"
|
||||
#tls_certificate_path: "%(tls_certificate_path)s"
|
||||
|
||||
# PEM-encoded private key for TLS
|
||||
tls_private_key_path: "%(tls_private_key_path)s"
|
||||
#
|
||||
#tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
@@ -172,37 +210,44 @@ class TlsConfig(Config):
|
||||
#
|
||||
acme:
|
||||
# ACME support is disabled by default. Uncomment the following line
|
||||
# to enable it.
|
||||
# (and tls_certificate_path and tls_private_key_path above) to enable it.
|
||||
#
|
||||
# enabled: true
|
||||
#enabled: true
|
||||
|
||||
# Endpoint to use to request certificates. If you only want to test,
|
||||
# use Let's Encrypt's staging url:
|
||||
# https://acme-staging.api.letsencrypt.org/directory
|
||||
#
|
||||
# url: https://acme-v01.api.letsencrypt.org/directory
|
||||
#url: https://acme-v01.api.letsencrypt.org/directory
|
||||
|
||||
# Port number to listen on for the HTTP-01 challenge. Change this if
|
||||
# you are forwarding connections through Apache/Nginx/etc.
|
||||
#
|
||||
# port: 80
|
||||
#port: 80
|
||||
|
||||
# Local addresses to listen on for incoming connections.
|
||||
# Again, you may want to change this if you are forwarding connections
|
||||
# through Apache/Nginx/etc.
|
||||
#
|
||||
# bind_addresses: ['::', '0.0.0.0']
|
||||
#bind_addresses: ['::', '0.0.0.0']
|
||||
|
||||
# How many days remaining on a certificate before it is renewed.
|
||||
#
|
||||
# reprovision_threshold: 30
|
||||
#reprovision_threshold: 30
|
||||
|
||||
# If your server runs behind a reverse-proxy which terminates TLS connections
|
||||
# (for both client and federation connections), it may be useful to disable
|
||||
# All TLS support for incoming connections. Setting no_tls to True will
|
||||
# do so (and avoid the need to give synapse a TLS private key).
|
||||
#
|
||||
# no_tls: True
|
||||
# The domain that the certificate should be for. Normally this
|
||||
# should be the same as your Matrix domain (i.e., 'server_name'), but,
|
||||
# by putting a file at 'https://<server_name>/.well-known/matrix/server',
|
||||
# you can delegate incoming traffic to another server. If you do that,
|
||||
# you should give the target of the delegation here.
|
||||
#
|
||||
# For example: if your 'server_name' is 'example.com', but
|
||||
# 'https://example.com/.well-known/matrix/server' delegates to
|
||||
# 'matrix.example.com', you should put 'matrix.example.com' here.
|
||||
#
|
||||
# If not set, defaults to your 'server_name'.
|
||||
#
|
||||
#domain: matrix.example.com
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
@@ -229,17 +274,44 @@ class TlsConfig(Config):
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
cert_pem = self.read_file(cert_path, "tls_certificate")
|
||||
return crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||
def read_tls_certificate(self):
|
||||
"""Reads the TLS certificate from the configured file, and returns it
|
||||
|
||||
def read_tls_private_key(self, private_key_path):
|
||||
private_key_pem = self.read_file(private_key_path, "tls_private_key")
|
||||
Also checks if it is self-signed, and warns if so
|
||||
|
||||
Returns:
|
||||
OpenSSL.crypto.X509: the certificate
|
||||
"""
|
||||
cert_path = self.tls_certificate_file
|
||||
logger.info("Loading TLS certificate from %s", cert_path)
|
||||
cert_pem = self.read_file(cert_path, "tls_certificate_path")
|
||||
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||
|
||||
# Check if it is self-signed, and issue a warning if so.
|
||||
if cert.get_issuer() == cert.get_subject():
|
||||
warnings.warn(
|
||||
(
|
||||
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
|
||||
"Please either provide a valid certificate, or use Synapse's ACME "
|
||||
"support to provision one."
|
||||
)
|
||||
)
|
||||
|
||||
return cert
|
||||
|
||||
def read_tls_private_key(self):
|
||||
"""Reads the TLS private key from the configured file, and returns it
|
||||
|
||||
Returns:
|
||||
OpenSSL.crypto.PKey: the private key
|
||||
"""
|
||||
private_key_path = self.tls_private_key_file
|
||||
logger.info("Loading TLS key from %s", private_key_path)
|
||||
private_key_pem = self.read_file(private_key_path, "tls_private_key_path")
|
||||
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
||||
|
||||
@@ -40,5 +40,5 @@ class UserDirectoryConfig(Config):
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
# search_all_users: false
|
||||
"""
|
||||
|
||||
@@ -27,20 +27,24 @@ class VoipConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Turn ##
|
||||
## TURN ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
#
|
||||
#turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
#
|
||||
#turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
#
|
||||
turn_user_lifetime: "1h"
|
||||
|
||||
# Whether guests should be allowed to use the TURN server.
|
||||
@@ -48,5 +52,6 @@ class VoipConfig(Config):
|
||||
# However, it does introduce a slight security risk as it allows users to
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
#
|
||||
turn_allow_guests: True
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
@@ -43,9 +45,7 @@ class ServerContextFactory(ContextFactory):
|
||||
logger.exception("Failed to enable elliptic curve for TLS")
|
||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||
|
||||
if not config.no_tls:
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
context.set_cipher_list(
|
||||
@@ -107,9 +107,7 @@ class ClientTLSOptions(object):
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
self._sendSNI = True
|
||||
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
@@ -130,10 +128,8 @@ class ClientTLSOptionsFactory(object):
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
pass
|
||||
self._options = CertificateOptions(verify=False)
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host,
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
|
||||
return ClientTLSOptions(host, self._options._makeContext())
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import raise_from
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
@@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
@@ -44,6 +50,7 @@ from synapse.util.logcontext import (
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -367,13 +374,18 @@ class Keyring(object):
|
||||
server_name_and_key_ids, perspective_name, perspective_keys
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except KeyLookupError as e:
|
||||
logger.warning(
|
||||
"Key lookup failed from %r: %s", perspective_name, e,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
@@ -421,21 +433,30 @@ class Keyring(object):
|
||||
# TODO(mark): Set the minimum_valid_until_ts to that needed by
|
||||
# the events being validated or the current time if validating
|
||||
# an incoming request.
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
try:
|
||||
query_response = yield self.client.post_json(
|
||||
destination=perspective_name,
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
u"server_keys": {
|
||||
server_name: {
|
||||
key_id: {
|
||||
u"minimum_valid_until_ts": 0
|
||||
} for key_id in key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
for server_name, key_ids in server_names_and_key_ids
|
||||
}
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
},
|
||||
long_retries=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
keys = {}
|
||||
|
||||
@@ -502,11 +523,20 @@ class Keyring(object):
|
||||
if requested_key_id in keys:
|
||||
continue
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
try:
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except (NotRetryingDestination, RequestSendFailed) as e:
|
||||
raise_from(
|
||||
KeyLookupError("Failed to connect to remote server"), e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
raise_from(
|
||||
KeyLookupError("Remote server returned an error"), e,
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
@@ -656,7 +686,7 @@ def _handle_key_deferred(verify_request):
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
except (IOError, RequestSendFailed) as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e),
|
||||
|
||||
@@ -33,6 +33,7 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
@@ -792,10 +793,25 @@ class FederationClient(FederationBase):
|
||||
defer.returnValue(content)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
err = e.to_synapse_error()
|
||||
|
||||
# If we receive an error response that isn't a generic error, we
|
||||
# assume that the remote understands the v2 invite API and this
|
||||
# is a legitimate error.
|
||||
if err.errcode != Codes.UNKNOWN:
|
||||
raise err
|
||||
|
||||
# Otherwise, we assume that the remote server doesn't understand
|
||||
# the v2 invite API.
|
||||
|
||||
if room_version in (RoomVersions.V1, RoomVersions.V2):
|
||||
pass # We'll fall through
|
||||
else:
|
||||
raise Exception("Remote server is too old")
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
elif e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
|
||||
@@ -25,9 +25,10 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
FederationError,
|
||||
IncompatibleRoomVersionError,
|
||||
NotFoundError,
|
||||
@@ -148,6 +149,22 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
# Reject if PDU count > 50 and EDU count > 100
|
||||
if (len(transaction.pdus) > 50
|
||||
or (hasattr(transaction, "edus") and len(transaction.edus) > 100)):
|
||||
|
||||
logger.info(
|
||||
"Transaction PDU or EDU count too large. Returning 400",
|
||||
)
|
||||
|
||||
response = {}
|
||||
yield self.transaction_actions.set_response(
|
||||
origin,
|
||||
transaction,
|
||||
400, response
|
||||
)
|
||||
defer.returnValue((400, response))
|
||||
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
@@ -223,8 +240,9 @@ class FederationServer(FederationBase):
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.error(
|
||||
"Failed to handle PDU %s: %s",
|
||||
event_id, f.getTraceback().rstrip(),
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
yield concurrently_execute(
|
||||
@@ -370,6 +388,13 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
@@ -33,7 +33,6 @@ from synapse.metrics import (
|
||||
event_processing_loop_counter,
|
||||
event_processing_loop_room_count,
|
||||
events_processed_counter,
|
||||
sent_edus_counter,
|
||||
sent_transactions_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -47,10 +46,24 @@ from .units import Edu, Transaction
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:count", ""
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations:total", ""
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
sent_edus_counter = Counter(
|
||||
"synapse_federation_client_sent_edus",
|
||||
"Total number of EDUs successfully sent",
|
||||
)
|
||||
|
||||
sent_edus_by_type = Counter(
|
||||
"synapse_federation_client_sent_edus_by_type",
|
||||
"Number of sent EDUs successfully sent, by event type",
|
||||
["type"],
|
||||
)
|
||||
|
||||
|
||||
@@ -360,8 +373,6 @@ class TransactionQueue(object):
|
||||
logger.info("Not sending EDU to ourselves")
|
||||
return
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
@@ -496,6 +507,9 @@ class TransactionQueue(object):
|
||||
)
|
||||
if success:
|
||||
sent_transactions_counter.inc()
|
||||
sent_edus_counter.inc(len(pending_edus))
|
||||
for edu in pending_edus:
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
|
||||
@@ -43,9 +43,20 @@ logger = logging.getLogger(__name__)
|
||||
class TransportLayerServer(JsonResource):
|
||||
"""Handles incoming federation HTTP requests"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs, servlet_groups=None):
|
||||
"""Initialize the TransportLayerServer
|
||||
|
||||
Will by default register all servlets. For custom behaviour, pass in
|
||||
a list of servlet_groups to register.
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): homeserver
|
||||
servlet_groups (list[str], optional): List of servlet groups to register.
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.servlet_groups = servlet_groups
|
||||
|
||||
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||
|
||||
@@ -67,6 +78,7 @@ class TransportLayerServer(JsonResource):
|
||||
resource=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
authenticator=self.authenticator,
|
||||
servlet_groups=self.servlet_groups,
|
||||
)
|
||||
|
||||
|
||||
@@ -724,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
data = yield self.handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
network_tuple=network_tuple,
|
||||
from_federation=True,
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
@@ -1308,10 +1321,12 @@ FEDERATION_SERVLET_CLASSES = (
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
OpenIdUserInfo,
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
OPENID_SERVLET_CLASSES = (
|
||||
OpenIdUserInfo,
|
||||
)
|
||||
|
||||
ROOM_LIST_CLASSES = (
|
||||
PublicRoomList,
|
||||
@@ -1350,44 +1365,83 @@ GROUP_ATTESTATION_SERVLET_CLASSES = (
|
||||
FederationGroupsRenewAttestaionServlet,
|
||||
)
|
||||
|
||||
DEFAULT_SERVLET_GROUPS = (
|
||||
"federation",
|
||||
"room_list",
|
||||
"group_server",
|
||||
"group_local",
|
||||
"group_attestation",
|
||||
"openid",
|
||||
)
|
||||
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_federation_server(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
for servletclass in ROOM_LIST_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_room_list_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
|
||||
"""Initialize and register servlet classes.
|
||||
|
||||
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_server_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
Will by default register all servlets. For custom behaviour, pass in
|
||||
a list of servlet_groups to register.
|
||||
|
||||
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_local_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): homeserver
|
||||
resource (TransportLayerServer): resource class to register to
|
||||
authenticator (Authenticator): authenticator to use
|
||||
ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
|
||||
servlet_groups (list[str], optional): List of servlet groups to register.
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
if not servlet_groups:
|
||||
servlet_groups = DEFAULT_SERVLET_GROUPS
|
||||
|
||||
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_attestation_renewer(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
if "federation" in servlet_groups:
|
||||
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_federation_server(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "openid" in servlet_groups:
|
||||
for servletclass in OPENID_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_federation_server(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "room_list" in servlet_groups:
|
||||
for servletclass in ROOM_LIST_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_room_list_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "group_server" in servlet_groups:
|
||||
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_server_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "group_local" in servlet_groups:
|
||||
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_local_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "group_attestation" in servlet_groups:
|
||||
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_attestation_renewer(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
@@ -191,6 +191,11 @@ class GroupAttestionRenewer(object):
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Failed to renew attestation of %r in %r: %s",
|
||||
user_id, group_id, e,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error renewing attestation of %r in %r",
|
||||
user_id, group_id)
|
||||
|
||||
@@ -113,8 +113,7 @@ class GroupsServerHandler(object):
|
||||
room_id = room_entry["room_id"]
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
entry = dict(entry) # so we don't change whats cached
|
||||
entry.pop("room_id", None)
|
||||
@@ -544,8 +543,7 @@ class GroupsServerHandler(object):
|
||||
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
room_id, len(joined_users), with_alias=False, allow_private=True,
|
||||
)
|
||||
|
||||
if not entry:
|
||||
|
||||
@@ -17,7 +17,6 @@ from .admin import AdminHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .federation import FederationHandler
|
||||
from .identity import IdentityHandler
|
||||
from .register import RegistrationHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
@@ -41,7 +40,6 @@ class Handlers(object):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.registration_handler = RegistrationHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
|
||||
@@ -167,4 +167,4 @@ class BaseHandler(object):
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error kicking guest user: %s" % (e,))
|
||||
logger.exception("Error kicking guest user: %s" % (e,))
|
||||
|
||||
@@ -56,6 +56,7 @@ class AcmeHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
self._acme_domain = hs.config.acme_domain
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_listening(self):
|
||||
@@ -123,15 +124,15 @@ class AcmeHandler(object):
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
|
||||
logger.warning("Reprovisioning %s", self.hs.hostname)
|
||||
logger.warning("Reprovisioning %s", self._acme_domain)
|
||||
|
||||
try:
|
||||
yield self._issuer.issue_cert(self.hs.hostname)
|
||||
yield self._issuer.issue_cert(self._acme_domain)
|
||||
except Exception:
|
||||
logger.exception("Fail!")
|
||||
raise
|
||||
logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
|
||||
cert_chain = self._store.certs[self.hs.hostname]
|
||||
logger.warning("Reprovisioned %s, saving.", self._acme_domain)
|
||||
cert_chain = self._store.certs[self._acme_domain]
|
||||
|
||||
try:
|
||||
with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
|
||||
|
||||
@@ -20,7 +20,11 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import FederationDeniedError
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
)
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -504,13 +508,13 @@ class DeviceListEduUpdater(object):
|
||||
origin = get_domain_from_id(user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except NotRetryingDestination:
|
||||
except (
|
||||
NotRetryingDestination, RequestSendFailed, HttpResponseException,
|
||||
):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn(
|
||||
"Failed to handle device list update for %s,"
|
||||
" we're not retrying the remote",
|
||||
user_id,
|
||||
"Failed to handle device list update for %s", user_id,
|
||||
)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
|
||||
@@ -112,7 +112,9 @@ class DirectoryHandler(BaseHandler):
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
|
||||
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||
if not self.config.is_alias_creation_allowed(
|
||||
user_id, room_id, room_alias.to_string(),
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
@@ -395,9 +397,9 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if not self.spam_checker.user_may_publish_room(
|
||||
requester.user.to_string(), room_id
|
||||
):
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_publish_room(user_id, room_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This user is not permitted to publish rooms to the room list"
|
||||
@@ -415,7 +417,24 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
making_public = visibility == "public"
|
||||
if making_public:
|
||||
room_aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
canonical_alias = yield self.store.get_canonical_alias_for_room(room_id)
|
||||
if canonical_alias:
|
||||
room_aliases.append(canonical_alias)
|
||||
|
||||
if not self.config.is_publishing_room_allowed(
|
||||
user_id, room_id, room_aliases,
|
||||
):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(
|
||||
403, "Not allowed to publish room",
|
||||
)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, making_public)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_appservice_room_list(self, appservice_id, network_id,
|
||||
|
||||
@@ -19,7 +19,13 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
NotFoundError,
|
||||
RoomKeysVersionError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -267,7 +273,7 @@ class E2eRoomKeysHandler(object):
|
||||
version(str): Optional; if None gives the most recent version
|
||||
otherwise a historical one.
|
||||
Raises:
|
||||
StoreError: code 404 if the requested backup version doesn't exist
|
||||
NotFoundError: if the requested backup version doesn't exist
|
||||
Returns:
|
||||
A deferred of a info dict that gives the info about the new version.
|
||||
|
||||
@@ -279,7 +285,13 @@ class E2eRoomKeysHandler(object):
|
||||
"""
|
||||
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
try:
|
||||
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown backup version")
|
||||
else:
|
||||
raise
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -290,8 +302,60 @@ class E2eRoomKeysHandler(object):
|
||||
user_id(str): the user whose current backup version we're deleting
|
||||
version(str): the version id of the backup being deleted
|
||||
Raises:
|
||||
StoreError: code 404 if this backup version doesn't exist
|
||||
NotFoundError: if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
try:
|
||||
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown backup version")
|
||||
else:
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_version(self, user_id, version, version_info):
|
||||
"""Update the info about a given version of the user's backup
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose current backup version we're updating
|
||||
version(str): the backup version we're updating
|
||||
version_info(dict): the new information about the backup
|
||||
Raises:
|
||||
NotFoundError: if the requested backup version doesn't exist
|
||||
Returns:
|
||||
A deferred of an empty dict.
|
||||
"""
|
||||
if "version" not in version_info:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Missing version in body",
|
||||
Codes.MISSING_PARAM
|
||||
)
|
||||
if version_info["version"] != version:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Version in body does not match",
|
||||
Codes.INVALID_PARAM
|
||||
)
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
try:
|
||||
old_info = yield self.store.get_e2e_room_keys_version_info(
|
||||
user_id, version
|
||||
)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown backup version")
|
||||
else:
|
||||
raise
|
||||
if old_info["algorithm"] != version_info["algorithm"]:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Algorithm does not match",
|
||||
Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
yield self.store.update_e2e_room_keys_version(user_id, version, version_info)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
|
||||
set(auth_events.keys()) | set(state_events.keys())
|
||||
)
|
||||
|
||||
# We now have a chunk of events plus associated state and auth chain to
|
||||
# persist. We do the persistence in two steps:
|
||||
# 1. Auth events and state get persisted as outliers, plus the
|
||||
# backward extremities get persisted (as non-outliers).
|
||||
# 2. The rest of the events in the chunk get persisted one by one, as
|
||||
# each one depends on the previous event for its state.
|
||||
#
|
||||
# The important thing is that events in the chunk get persisted as
|
||||
# non-outliers, including when those events are also in the state or
|
||||
# auth chain. Caution must therefore be taken to ensure that they are
|
||||
# not accidentally marked as outliers.
|
||||
|
||||
# Step 1a: persist auth events that *don't* appear in the chunk
|
||||
ev_infos = []
|
||||
for a in auth_events.values():
|
||||
if a.event_id in seen_events:
|
||||
# We only want to persist auth events as outliers that we haven't
|
||||
# seen and aren't about to persist as part of the backfilled chunk.
|
||||
if a.event_id in seen_events or a.event_id in event_map:
|
||||
continue
|
||||
|
||||
a.internal_metadata.outlier = True
|
||||
ev_infos.append({
|
||||
"event": a,
|
||||
@@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
})
|
||||
|
||||
# Step 1b: persist the events in the chunk we fetched state for (i.e.
|
||||
# the backwards extremities) as non-outliers.
|
||||
for e_id in events_to_state:
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
ev = event_map[e_id]
|
||||
assert(not ev.internal_metadata.is_outlier())
|
||||
|
||||
ev_infos.append({
|
||||
"event": event_map[e_id],
|
||||
"event": ev,
|
||||
"state": events_to_state[e_id],
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id in event_map[e_id].auth_event_ids()
|
||||
for a_id in ev.auth_event_ids()
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
# Step 2: Persist the rest of the events in the chunk one by one
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
for event in events:
|
||||
if event in events_to_state:
|
||||
continue
|
||||
|
||||
# For paranoia we ensure that these events are marked as
|
||||
# non-outliers
|
||||
assert(not event.internal_metadata.is_outlier())
|
||||
|
||||
# We store these one at a time since each event depends on the
|
||||
# previous to work out the state.
|
||||
# TODO: We can probably do something more clever here.
|
||||
|
||||
@@ -20,7 +20,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,13 +46,19 @@ def _create_rerouter(func_name):
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
def h(failure):
|
||||
def http_response_errback(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
return failure
|
||||
d.addErrback(h)
|
||||
|
||||
def request_failed_errback(failure):
|
||||
failure.trap(RequestSendFailed)
|
||||
raise SynapseError(502, "Failed to contact group server")
|
||||
|
||||
d.addErrback(http_response_errback)
|
||||
d.addErrback(request_failed_errback)
|
||||
return d
|
||||
return f
|
||||
|
||||
|
||||
@@ -136,7 +136,11 @@ class PaginationHandler(object):
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"[purge] failed",
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
@@ -254,7 +258,7 @@ class PaginationHandler(object):
|
||||
})
|
||||
|
||||
state = None
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
if event_filter and event_filter.lazy_load_members() and len(events) > 0:
|
||||
# TODO: remove redundant members
|
||||
|
||||
# FIXME: we also care about invite targets etc.
|
||||
|
||||
@@ -16,8 +16,8 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import logcontext
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
|
||||
if is_new:
|
||||
# fire off a process in the background to send the receipt to
|
||||
# remote servers
|
||||
self._push_remotes([receipt])
|
||||
run_as_background_process(
|
||||
'push_receipts_to_remotes', self._push_remotes, receipt
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _received_remote_receipt(self, origin, content):
|
||||
@@ -125,44 +127,42 @@ class ReceiptsHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
@logcontext.preserve_fn # caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def _push_remotes(self, receipts):
|
||||
"""Given a list of receipts, works out which remote servers should be
|
||||
def _push_remotes(self, receipt):
|
||||
"""Given a receipt, works out which remote servers should be
|
||||
poked and pokes them.
|
||||
"""
|
||||
try:
|
||||
# TODO: Some of this stuff should be coallesced.
|
||||
for receipt in receipts:
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
# TODO: optimise this to move some of the work to the workers.
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
}
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error pushing receipts to remote servers")
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -26,7 +27,14 @@ from synapse.api.errors import (
|
||||
RegistrationError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
||||
from synapse.replication.http.register import (
|
||||
ReplicationPostRegisterActionsServlet,
|
||||
ReplicationRegisterServlet,
|
||||
)
|
||||
from synapse.types import RoomAlias, RoomID, UserID, create_requester
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
@@ -51,6 +59,7 @@ class RegistrationHandler(BaseHandler):
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
self.identity_handler = self.hs.get_handlers().identity_handler
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
@@ -61,6 +70,18 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
if hs.config.worker_app:
|
||||
self._register_client = ReplicationRegisterServlet.make_client(hs)
|
||||
self._register_device_client = (
|
||||
RegisterDeviceReplicationServlet.make_client(hs)
|
||||
)
|
||||
self._post_registration_client = (
|
||||
ReplicationPostRegisterActionsServlet.make_client(hs)
|
||||
)
|
||||
else:
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
@@ -155,7 +176,7 @@ class RegistrationHandler(BaseHandler):
|
||||
yield self.auth.check_auth_blocking(threepid=threepid)
|
||||
password_hash = None
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
password_hash = yield self._auth_handler.hash(password)
|
||||
|
||||
if localpart:
|
||||
yield self.check_username(localpart, guest_access_token=guest_access_token)
|
||||
@@ -185,7 +206,7 @@ class RegistrationHandler(BaseHandler):
|
||||
token = None
|
||||
if generate_token:
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -217,7 +238,7 @@ class RegistrationHandler(BaseHandler):
|
||||
if default_display_name is None:
|
||||
default_display_name = localpart
|
||||
try:
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -316,7 +337,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, allowed_appservice=service
|
||||
)
|
||||
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
appservice_id=service_id,
|
||||
@@ -359,8 +380,7 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("validating threepidcred sid %s on id server %s",
|
||||
c['sid'], c['idServer'])
|
||||
try:
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
threepid = yield identity_handler.threepid_from_creds(c)
|
||||
threepid = yield self.identity_handler.threepid_from_creds(c)
|
||||
except Exception:
|
||||
logger.exception("Couldn't validate 3pid")
|
||||
raise RegistrationError(400, "Couldn't validate 3pid")
|
||||
@@ -384,9 +404,8 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
# Now we have a matrix ID, bind it to the threepids we were given
|
||||
for c in threepidCreds:
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
# XXX: This should be a deferred list, shouldn't it?
|
||||
yield identity_handler.bind_threepid(c, user_id)
|
||||
yield self.identity_handler.bind_threepid(c, user_id)
|
||||
|
||||
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
|
||||
# don't allow people to register the server notices mxid
|
||||
@@ -441,7 +460,7 @@ class RegistrationHandler(BaseHandler):
|
||||
lines = response.split('\n')
|
||||
json = {
|
||||
"valid": lines[0] == 'true',
|
||||
"error_url": "http://www.google.com/recaptcha/api/challenge?" +
|
||||
"error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
|
||||
"error=%s" % lines[1]
|
||||
}
|
||||
defer.returnValue(json)
|
||||
@@ -452,7 +471,7 @@ class RegistrationHandler(BaseHandler):
|
||||
Used only by c/s api v1
|
||||
"""
|
||||
data = yield self.captcha_client.post_urlencoded_get_raw(
|
||||
"http://www.google.com:80/recaptcha/api/verify",
|
||||
"http://www.recaptcha.net:80/recaptcha/api/verify",
|
||||
args={
|
||||
'privatekey': private_key,
|
||||
'remoteip': ip_addr,
|
||||
@@ -494,7 +513,7 @@ class RegistrationHandler(BaseHandler):
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
|
||||
if need_register:
|
||||
yield self.store.register(
|
||||
yield self._register_with_store(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
@@ -512,9 +531,6 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
def auth_handler(self):
|
||||
return self.hs.get_auth_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
|
||||
"""Get a guest access token for a 3PID, creating a guest account if
|
||||
@@ -573,3 +589,275 @@ class RegistrationHandler(BaseHandler):
|
||||
action="join",
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
def _register_with_store(self, user_id, token=None, password_hash=None,
|
||||
was_guest=False, make_guest=False, appservice_id=None,
|
||||
create_profile_with_displayname=None, admin=False,
|
||||
user_type=None):
|
||||
"""Register user in the datastore.
|
||||
|
||||
Args:
|
||||
user_id (str): The desired user ID to register.
|
||||
token (str): The desired access token to use for this user. If this
|
||||
is not None, the given access token is associated with the user
|
||||
id.
|
||||
password_hash (str|None): Optional. The password hash for this user.
|
||||
was_guest (bool): Optional. Whether this is a guest account being
|
||||
upgraded to a non-guest account.
|
||||
make_guest (boolean): True if the the new user should be guest,
|
||||
false to add a regular user account.
|
||||
appservice_id (str|None): The ID of the appservice registering the user.
|
||||
create_profile_with_displayname (unicode|None): Optionally create a
|
||||
profile for the user, setting their displayname to the given value
|
||||
admin (boolean): is an admin user?
|
||||
user_type (str|None): type of user. One of the values from
|
||||
api.constants.UserTypes, or None for a normal user.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
if self.hs.config.worker_app:
|
||||
return self._register_client(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
appservice_id=appservice_id,
|
||||
create_profile_with_displayname=create_profile_with_displayname,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
)
|
||||
else:
|
||||
return self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
appservice_id=appservice_id,
|
||||
create_profile_with_displayname=create_profile_with_displayname,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_device(self, user_id, device_id, initial_display_name,
|
||||
is_guest=False):
|
||||
"""Register a device for a user and generate an access token.
|
||||
|
||||
Args:
|
||||
user_id (str): full canonical @user:id
|
||||
device_id (str|None): The device ID to check, or None to generate
|
||||
a new one.
|
||||
initial_display_name (str|None): An optional display name for the
|
||||
device.
|
||||
is_guest (bool): Whether this is a guest account
|
||||
|
||||
Returns:
|
||||
defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
|
||||
"""
|
||||
|
||||
if self.hs.config.worker_app:
|
||||
r = yield self._register_device_client(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_display_name=initial_display_name,
|
||||
is_guest=is_guest,
|
||||
)
|
||||
defer.returnValue((r["device_id"], r["access_token"]))
|
||||
else:
|
||||
device_id = yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
if is_guest:
|
||||
access_token = self.macaroon_gen.generate_access_token(
|
||||
user_id, ["guest = true"]
|
||||
)
|
||||
else:
|
||||
access_token = yield self._auth_handler.get_access_token_for_user_id(
|
||||
user_id, device_id=device_id,
|
||||
)
|
||||
|
||||
defer.returnValue((device_id, access_token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_registration_actions(self, user_id, auth_result, access_token,
|
||||
bind_email, bind_msisdn):
|
||||
"""A user has completed registration
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID that consented
|
||||
auth_result (dict): The authenticated credentials of the newly
|
||||
registered user.
|
||||
access_token (str|None): The access token of the newly logged in
|
||||
device, or None if `inhibit_login` enabled.
|
||||
bind_email (bool): Whether to bind the email with the identity
|
||||
server
|
||||
bind_msisdn (bool): Whether to bind the msisdn with the identity
|
||||
server
|
||||
"""
|
||||
if self.hs.config.worker_app:
|
||||
yield self._post_registration_client(
|
||||
user_id=user_id,
|
||||
auth_result=auth_result,
|
||||
access_token=access_token,
|
||||
bind_email=bind_email,
|
||||
bind_msisdn=bind_msisdn,
|
||||
)
|
||||
return
|
||||
|
||||
if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
|
||||
threepid = auth_result[LoginType.EMAIL_IDENTITY]
|
||||
# Necessary due to auth checks prior to the threepid being
|
||||
# written to the db
|
||||
if is_threepid_reserved(
|
||||
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||
):
|
||||
yield self.store.upsert_monthly_active_user(user_id)
|
||||
|
||||
yield self._register_email_threepid(
|
||||
user_id, threepid, access_token,
|
||||
bind_email,
|
||||
)
|
||||
|
||||
if auth_result and LoginType.MSISDN in auth_result:
|
||||
threepid = auth_result[LoginType.MSISDN]
|
||||
yield self._register_msisdn_threepid(
|
||||
user_id, threepid, bind_msisdn,
|
||||
)
|
||||
|
||||
if auth_result and LoginType.TERMS in auth_result:
|
||||
yield self._on_user_consented(
|
||||
user_id, self.hs.config.user_consent_version,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_user_consented(self, user_id, consent_version):
|
||||
"""A user consented to the terms on registration
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID that consented
|
||||
consent_version (str): version of the policy the user has
|
||||
consented to.
|
||||
"""
|
||||
logger.info("%s has consented to the privacy policy", user_id)
|
||||
yield self.store.user_set_consent_version(
|
||||
user_id, consent_version,
|
||||
)
|
||||
yield self.post_consent_actions(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_email_threepid(self, user_id, threepid, token, bind_email):
|
||||
"""Add an email address as a 3pid identifier
|
||||
|
||||
Also adds an email pusher for the email address, if configured in the
|
||||
HS config
|
||||
|
||||
Also optionally binds emails to the given user_id on the identity server
|
||||
|
||||
Must be called on master.
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.email.identity auth response
|
||||
token (str|None): access_token for the user, or None if not logged
|
||||
in.
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
reqd = ('medium', 'address', 'validated_at')
|
||||
if any(x not in threepid for x in reqd):
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
return
|
||||
|
||||
yield self._auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
# And we add an email pusher for them by default, but only
|
||||
# if email notifications are enabled (so people don't start
|
||||
# getting mail spam where they weren't before if email
|
||||
# notifs are set up on a home server)
|
||||
if (self.hs.config.email_enable_notifs and
|
||||
self.hs.config.email_notif_for_new_users
|
||||
and token):
|
||||
# Pull the ID of the access token back out of the db
|
||||
# It would really make more sense for this to be passed
|
||||
# up when the access token is saved, but that's quite an
|
||||
# invasive change I'd rather do separately.
|
||||
user_tuple = yield self.store.get_user_by_access_token(
|
||||
token
|
||||
)
|
||||
token_id = user_tuple["token_id"]
|
||||
|
||||
yield self.pusher_pool.add_pusher(
|
||||
user_id=user_id,
|
||||
access_token=token_id,
|
||||
kind="email",
|
||||
app_id="m.email",
|
||||
app_display_name="Email Notifications",
|
||||
device_display_name=threepid["address"],
|
||||
pushkey=threepid["address"],
|
||||
lang=None, # We don't know a user's language here
|
||||
data={},
|
||||
)
|
||||
|
||||
if bind_email:
|
||||
logger.info("bind_email specified: binding")
|
||||
logger.debug("Binding emails %s to %s" % (
|
||||
threepid, user_id
|
||||
))
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_email not specified: not binding email")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
|
||||
"""Add a phone number as a 3pid identifier
|
||||
|
||||
Also optionally binds msisdn to the given user_id on the identity server
|
||||
|
||||
Must be called on master.
|
||||
|
||||
Args:
|
||||
user_id (str): id of user
|
||||
threepid (object): m.login.msisdn auth response
|
||||
token (str): access_token for the user
|
||||
bind_email (bool): true if the client requested the email to be
|
||||
bound at the identity server
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
try:
|
||||
assert_params_in_dict(threepid, ['medium', 'address', 'validated_at'])
|
||||
except SynapseError as ex:
|
||||
if ex.errcode == Codes.MISSING_PARAM:
|
||||
# This will only happen if the ID server returns a malformed response
|
||||
logger.info("Can't add incomplete 3pid")
|
||||
defer.returnValue(None)
|
||||
raise
|
||||
|
||||
yield self._auth_handler.add_threepid(
|
||||
user_id,
|
||||
threepid['medium'],
|
||||
threepid['address'],
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
if bind_msisdn:
|
||||
logger.info("bind_msisdn specified: binding")
|
||||
logger.debug("Binding msisdn %s to %s", threepid, user_id)
|
||||
yield self.identity_handler.bind_threepid(
|
||||
threepid['threepid_creds'], user_id
|
||||
)
|
||||
else:
|
||||
logger.info("bind_msisdn not specified: not binding msisdn")
|
||||
|
||||
@@ -263,6 +263,16 @@ class RoomCreationHandler(BaseHandler):
|
||||
}
|
||||
}
|
||||
|
||||
# Check if old room was non-federatable
|
||||
|
||||
# Get old room's create event
|
||||
old_room_create_event = yield self.store.get_create_event_for_room(old_room_id)
|
||||
|
||||
# Check if the create event specified a non-federatable room
|
||||
if not old_room_create_event.content.get("m.federate", True):
|
||||
# If so, mark the new room as non-federatable as well
|
||||
creation_content["m.federate"] = False
|
||||
|
||||
initial_state = dict()
|
||||
|
||||
# Replicate relevant room events
|
||||
@@ -274,6 +284,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
(EventTypes.GuestAccess, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
(EventTypes.Encryption, ""),
|
||||
(EventTypes.ServerACL, ""),
|
||||
)
|
||||
|
||||
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
@@ -300,6 +311,28 @@ class RoomCreationHandler(BaseHandler):
|
||||
creation_content=creation_content,
|
||||
)
|
||||
|
||||
# Transfer membership events
|
||||
old_room_member_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
old_room_id, StateFilter.from_types([(EventTypes.Member, None)]),
|
||||
)
|
||||
|
||||
# map from event_id to BaseEvent
|
||||
old_room_member_state_events = yield self.store.get_events(
|
||||
old_room_member_state_ids.values(),
|
||||
)
|
||||
for k, old_event in iteritems(old_room_member_state_events):
|
||||
# Only transfer ban events
|
||||
if ("membership" in old_event.content and
|
||||
old_event.content["membership"] == "ban"):
|
||||
yield self.room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(old_event['state_key']),
|
||||
new_room_id,
|
||||
"ban",
|
||||
ratelimit=False,
|
||||
content=old_event.content,
|
||||
)
|
||||
|
||||
# XXX invites/joins
|
||||
# XXX 3pid invites
|
||||
|
||||
|
||||
@@ -50,16 +50,17 @@ class RoomListHandler(BaseHandler):
|
||||
|
||||
def get_local_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,):
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
from_federation=False):
|
||||
"""Generate a local public room list.
|
||||
|
||||
There are multiple different lists: the main one plus one per third
|
||||
party network. A client can ask for a specific list or to return all.
|
||||
|
||||
Args:
|
||||
limit (int)
|
||||
since_token (str)
|
||||
search_filter (dict)
|
||||
limit (int|None)
|
||||
since_token (str|None)
|
||||
search_filter (dict|None)
|
||||
network_tuple (ThirdPartyInstanceID): Which public list to use.
|
||||
This can be (None, None) to indicate the main list, or a particular
|
||||
appservice and network id to use an appservice specific one.
|
||||
@@ -87,14 +88,30 @@ class RoomListHandler(BaseHandler):
|
||||
return self.response_cache.wrap(
|
||||
key,
|
||||
self._get_public_room_list,
|
||||
limit, since_token, network_tuple=network_tuple,
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple, from_federation=from_federation,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
from_federation=False,
|
||||
timeout=None,):
|
||||
"""Generate a public room list.
|
||||
Args:
|
||||
limit (int|None): Maximum amount of rooms to return.
|
||||
since_token (str|None)
|
||||
search_filter (dict|None): Dictionary to filter rooms by.
|
||||
network_tuple (ThirdPartyInstanceID): Which public list to use.
|
||||
This can be (None, None) to indicate the main list, or a particular
|
||||
appservice and network id to use an appservice specific one.
|
||||
Setting to None returns all public rooms across all lists.
|
||||
from_federation (bool): Whether this request originated from a
|
||||
federating server or a client. Used for room filtering.
|
||||
timeout (int|None): Amount of seconds to wait for a response before
|
||||
timing out.
|
||||
"""
|
||||
if since_token and since_token != "END":
|
||||
since_token = RoomListNextBatch.from_token(since_token)
|
||||
else:
|
||||
@@ -217,7 +234,8 @@ class RoomListHandler(BaseHandler):
|
||||
yield concurrently_execute(
|
||||
lambda r: self._append_room_entry_to_chunk(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
chunk, limit, search_filter,
|
||||
from_federation=from_federation,
|
||||
),
|
||||
batch, 5,
|
||||
)
|
||||
@@ -288,23 +306,51 @@ class RoomListHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
|
||||
search_filter):
|
||||
search_filter, from_federation=False):
|
||||
"""Generate the entry for a room in the public room list and append it
|
||||
to the `chunk` if it matches the search filter
|
||||
|
||||
Args:
|
||||
room_id (str): The ID of the room.
|
||||
num_joined_users (int): The number of joined users in the room.
|
||||
chunk (list)
|
||||
limit (int|None): Maximum amount of rooms to display. Function will
|
||||
return if length of chunk is greater than limit + 1.
|
||||
search_filter (dict|None)
|
||||
from_federation (bool): Whether this request originated from a
|
||||
federating server or a client. Used for room filtering.
|
||||
"""
|
||||
if limit and len(chunk) > limit + 1:
|
||||
# We've already got enough, so lets just drop it.
|
||||
return
|
||||
|
||||
result = yield self.generate_room_entry(room_id, num_joined_users)
|
||||
if not result:
|
||||
return
|
||||
|
||||
if result and _matches_room_entry(result, search_filter):
|
||||
if from_federation and not result.get("m.federate", True):
|
||||
# This is a room that other servers cannot join. Do not show them
|
||||
# this room.
|
||||
return
|
||||
|
||||
if _matches_room_entry(result, search_filter):
|
||||
chunk.append(result)
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def generate_room_entry(self, room_id, num_joined_users, cache_context,
|
||||
with_alias=True, allow_private=False):
|
||||
"""Returns the entry for a room
|
||||
|
||||
Args:
|
||||
room_id (str): The room's ID.
|
||||
num_joined_users (int): Number of users in the room.
|
||||
cache_context: Information for cached responses.
|
||||
with_alias (bool): Whether to return the room's aliases in the result.
|
||||
allow_private (bool): Whether invite-only rooms should be shown.
|
||||
|
||||
Returns:
|
||||
Deferred[dict|None]: Returns a room entry as a dictionary, or None if this
|
||||
room was determined not to be shown publicly.
|
||||
"""
|
||||
result = {
|
||||
"room_id": room_id,
|
||||
@@ -318,6 +364,7 @@ class RoomListHandler(BaseHandler):
|
||||
event_map = yield self.store.get_events([
|
||||
event_id for key, event_id in iteritems(current_state_ids)
|
||||
if key[0] in (
|
||||
EventTypes.Create,
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.Name,
|
||||
EventTypes.Topic,
|
||||
@@ -334,12 +381,17 @@ class RoomListHandler(BaseHandler):
|
||||
}
|
||||
|
||||
# Double check that this is actually a public room.
|
||||
|
||||
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
join_rule = join_rules_event.content.get("join_rule", None)
|
||||
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
|
||||
defer.returnValue(None)
|
||||
|
||||
# Return whether this room is open to federation users or not
|
||||
create_event = current_state.get((EventTypes.Create, ""))
|
||||
result["m.federate"] = create_event.content.get("m.federate", True)
|
||||
|
||||
if with_alias:
|
||||
aliases = yield self.store.get_aliases_for_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
|
||||
@@ -61,7 +61,7 @@ class RoomMemberHandler(object):
|
||||
|
||||
self.federation_handler = hs.get_handlers().federation_handler
|
||||
self.directory_handler = hs.get_handlers().directory_handler
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ class UserDirectoryHandler(object):
|
||||
# Support users are for diagnostics and should not appear in the user directory.
|
||||
if not is_support:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
user_id, profile.display_name, profile.avatar_url, None
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -166,8 +166,9 @@ class UserDirectoryHandler(object):
|
||||
self.pos = deltas[-1]["stream_id"]
|
||||
|
||||
# Expose current event processing position to prometheus
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"user_dir").set(self.pos)
|
||||
synapse.metrics.event_processing_positions.labels("user_dir").set(
|
||||
self.pos
|
||||
)
|
||||
|
||||
yield self.store.update_user_directory_stream_pos(self.pos)
|
||||
|
||||
@@ -191,21 +192,25 @@ class UserDirectoryHandler(object):
|
||||
logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
|
||||
yield self._handle_initial_room(room_id)
|
||||
num_processed_rooms += 1
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
|
||||
logger.info("Processed all rooms.")
|
||||
|
||||
if self.search_all_users:
|
||||
num_processed_users = 0
|
||||
user_ids = yield self.store.get_all_local_users()
|
||||
logger.info("Doing initial update of user directory. %d users", len(user_ids))
|
||||
logger.info(
|
||||
"Doing initial update of user directory. %d users", len(user_ids)
|
||||
)
|
||||
for user_id in user_ids:
|
||||
# We add profiles for all users even if they don't match the
|
||||
# include pattern, just in case we want to change it in future
|
||||
logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
|
||||
logger.info(
|
||||
"Handling user %d/%d", num_processed_users + 1, len(user_ids)
|
||||
)
|
||||
yield self._handle_local_user(user_id)
|
||||
num_processed_users += 1
|
||||
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
|
||||
|
||||
logger.info("Processed all users")
|
||||
|
||||
@@ -224,24 +229,24 @@ class UserDirectoryHandler(object):
|
||||
if not is_in_room:
|
||||
return
|
||||
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
|
||||
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
user_ids = set(users_with_profile)
|
||||
unhandled_users = user_ids - self.initially_handled_users
|
||||
|
||||
yield self.store.add_profiles_to_user_dir(
|
||||
room_id, {
|
||||
user_id: users_with_profile[user_id] for user_id in unhandled_users
|
||||
}
|
||||
room_id,
|
||||
{user_id: users_with_profile[user_id] for user_id in unhandled_users},
|
||||
)
|
||||
|
||||
self.initially_handled_users |= unhandled_users
|
||||
|
||||
if is_public:
|
||||
yield self.store.add_users_to_public_room(
|
||||
room_id,
|
||||
user_ids=user_ids - self.initially_handled_users_in_public
|
||||
room_id, user_ids=user_ids - self.initially_handled_users_in_public
|
||||
)
|
||||
self.initially_handled_users_in_public |= user_ids
|
||||
|
||||
@@ -253,7 +258,7 @@ class UserDirectoryHandler(object):
|
||||
count = 0
|
||||
for user_id in user_ids:
|
||||
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
count += 1
|
||||
@@ -268,7 +273,7 @@ class UserDirectoryHandler(object):
|
||||
continue
|
||||
|
||||
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
|
||||
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
|
||||
count += 1
|
||||
|
||||
user_set = (user_id, other_user_id)
|
||||
@@ -290,25 +295,23 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
room_id, not is_public, to_insert
|
||||
)
|
||||
to_insert.clear()
|
||||
|
||||
if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
|
||||
to_insert.clear()
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
to_update.clear()
|
||||
|
||||
@@ -329,11 +332,12 @@ class UserDirectoryHandler(object):
|
||||
# may have become public or not and add/remove the users in said room
|
||||
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
|
||||
yield self._handle_room_publicity_change(
|
||||
room_id, prev_event_id, event_id, typ,
|
||||
room_id, prev_event_id, event_id, typ
|
||||
)
|
||||
elif typ == EventTypes.Member:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="membership",
|
||||
public_value=Membership.JOIN,
|
||||
)
|
||||
@@ -342,14 +346,16 @@ class UserDirectoryHandler(object):
|
||||
# Need to check if the server left the room entirely, if so
|
||||
# we might need to remove all the users in that room
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
room_id, self.server_name,
|
||||
room_id, self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
logger.info("Server left room: %r", room_id)
|
||||
# Fetch all the users that we marked as being in user
|
||||
# directory due to being in the room and then check if
|
||||
# need to remove those users or not
|
||||
user_ids = yield self.store.get_users_in_dir_due_to_room(room_id)
|
||||
user_ids = yield self.store.get_users_in_dir_due_to_room(
|
||||
room_id
|
||||
)
|
||||
for user_id in user_ids:
|
||||
yield self._handle_remove_user(room_id, user_id)
|
||||
return
|
||||
@@ -361,7 +367,7 @@ class UserDirectoryHandler(object):
|
||||
if change is None:
|
||||
# Handle any profile changes
|
||||
yield self._handle_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id,
|
||||
state_key, room_id, prev_event_id, event_id
|
||||
)
|
||||
continue
|
||||
|
||||
@@ -393,13 +399,15 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if typ == EventTypes.RoomHistoryVisibility:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="history_visibility",
|
||||
public_value="world_readable",
|
||||
)
|
||||
elif typ == EventTypes.JoinRules:
|
||||
change = yield self._get_key_change(
|
||||
prev_event_id, event_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
key_name="join_rule",
|
||||
public_value=JoinRules.PUBLIC,
|
||||
)
|
||||
@@ -524,7 +532,7 @@ class UserDirectoryHandler(object):
|
||||
)
|
||||
if self.is_mine_id(other_user_id) and not is_appservice:
|
||||
shared_is_private = yield self.store.get_if_users_share_a_room(
|
||||
other_user_id, user_id,
|
||||
other_user_id, user_id
|
||||
)
|
||||
if shared_is_private is True:
|
||||
# We've already marked in the database they share a private room
|
||||
@@ -539,13 +547,11 @@ class UserDirectoryHandler(object):
|
||||
to_insert.add((other_user_id, user_id))
|
||||
|
||||
if to_insert:
|
||||
yield self.store.add_users_who_share_room(
|
||||
room_id, not is_public, to_insert,
|
||||
)
|
||||
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
|
||||
|
||||
if to_update:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, to_update,
|
||||
room_id, not is_public, to_update
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -564,15 +570,15 @@ class UserDirectoryHandler(object):
|
||||
row = yield self.store.get_user_in_public_room(user_id)
|
||||
update_user_in_public = row and row["room_id"] == room_id
|
||||
|
||||
if (update_user_in_public or update_user_dir):
|
||||
if update_user_in_public or update_user_dir:
|
||||
# XXX: Make this faster?
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
for j_room_id in rooms:
|
||||
if (not update_user_in_public and not update_user_dir):
|
||||
if not update_user_in_public and not update_user_dir:
|
||||
break
|
||||
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
j_room_id, self.server_name,
|
||||
j_room_id, self.server_name
|
||||
)
|
||||
|
||||
if not is_in_room:
|
||||
@@ -600,19 +606,19 @@ class UserDirectoryHandler(object):
|
||||
# Get a list of user tuples that were in the DB due to this room and
|
||||
# users (this includes tuples where the other user matches `user_id`)
|
||||
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
|
||||
user_id, room_id,
|
||||
user_id, room_id
|
||||
)
|
||||
|
||||
for user_id, other_user_id in user_tuples:
|
||||
# For each user tuple get a list of rooms that they still share,
|
||||
# trying to find a private room, and update the entry in the DB
|
||||
rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
|
||||
rooms = yield self.store.get_rooms_in_common_for_users(
|
||||
user_id, other_user_id
|
||||
)
|
||||
|
||||
# If they dont share a room anymore, remove the mapping
|
||||
if not rooms:
|
||||
yield self.store.remove_user_who_share_room(
|
||||
user_id, other_user_id,
|
||||
)
|
||||
yield self.store.remove_user_who_share_room(user_id, other_user_id)
|
||||
continue
|
||||
|
||||
found_public_share = None
|
||||
@@ -626,13 +632,13 @@ class UserDirectoryHandler(object):
|
||||
else:
|
||||
found_public_share = None
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
room_id, not is_public, [(user_id, other_user_id)]
|
||||
)
|
||||
break
|
||||
|
||||
if found_public_share:
|
||||
yield self.store.update_users_who_share_room(
|
||||
room_id, not is_public, [(user_id, other_user_id)],
|
||||
room_id, not is_public, [(user_id, other_user_id)]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -660,7 +666,7 @@ class UserDirectoryHandler(object):
|
||||
|
||||
if prev_name != new_name or prev_avatar != new_avatar:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, new_name, new_avatar, room_id,
|
||||
user_id, new_name, new_avatar, room_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
# limitations under the License.
|
||||
import re
|
||||
|
||||
from twisted.internet import task
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.python import failure
|
||||
from twisted.web.client import FileBodyProducer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
@@ -47,3 +49,16 @@ def redact_uri(uri):
|
||||
r'\1<redacted>\3',
|
||||
uri
|
||||
)
|
||||
|
||||
|
||||
class QuieterFileBodyProducer(FileBodyProducer):
|
||||
"""Wrapper for FileBodyProducer that avoids CRITICAL errors when the connection drops.
|
||||
|
||||
Workaround for https://github.com/matrix-org/synapse/issues/4003 /
|
||||
https://twistedmatrix.com/trac/ticket/6528
|
||||
"""
|
||||
def stopProducing(self):
|
||||
try:
|
||||
FileBodyProducer.stopProducing(self)
|
||||
except task.TaskStopped:
|
||||
pass
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
from six import text_type
|
||||
from six.moves import urllib
|
||||
@@ -39,7 +40,11 @@ from twisted.web.http import PotentialDataLoss
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.http import cancelled_to_request_timed_out_error, redact_uri
|
||||
from synapse.http import (
|
||||
QuieterFileBodyProducer,
|
||||
cancelled_to_request_timed_out_error,
|
||||
redact_uri,
|
||||
)
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
@@ -246,7 +251,7 @@ class SimpleHttpClient(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, data=b'', headers=None):
|
||||
def request(self, method, uri, data=None, headers=None):
|
||||
"""
|
||||
Args:
|
||||
method (str): HTTP method to use.
|
||||
@@ -265,11 +270,15 @@ class SimpleHttpClient(object):
|
||||
logger.info("Sending request %s %s", method, redact_uri(uri))
|
||||
|
||||
try:
|
||||
body_producer = None
|
||||
if data is not None:
|
||||
body_producer = QuieterFileBodyProducer(BytesIO(data))
|
||||
|
||||
request_deferred = treq.request(
|
||||
method,
|
||||
uri,
|
||||
agent=self.agent,
|
||||
data=data,
|
||||
data=body_producer,
|
||||
headers=headers,
|
||||
**self._extra_treq_args
|
||||
)
|
||||
|
||||
@@ -32,7 +32,6 @@ from twisted.internet import defer, protocol
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.internet.task import _EPSILON, Cooperator
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import FileBodyProducer
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
import synapse.metrics
|
||||
@@ -44,6 +43,7 @@ from synapse.api.errors import (
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
@@ -168,7 +168,7 @@ class MatrixFederationHttpClient(object):
|
||||
requests.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs, tls_client_options_factory):
|
||||
self.hs = hs
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
@@ -176,7 +176,7 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
self.agent = MatrixFederationAgent(
|
||||
hs.get_reactor(),
|
||||
hs.tls_client_options_factory,
|
||||
tls_client_options_factory,
|
||||
)
|
||||
self.clock = hs.get_clock()
|
||||
self._store = hs.get_datastore()
|
||||
@@ -286,7 +286,7 @@ class MatrixFederationHttpClient(object):
|
||||
json,
|
||||
)
|
||||
data = encode_canonical_json(json)
|
||||
producer = FileBodyProducer(
|
||||
producer = QuieterFileBodyProducer(
|
||||
BytesIO(data),
|
||||
cooperator=self._cooperator,
|
||||
)
|
||||
|
||||
@@ -106,10 +106,10 @@ def wrap_json_request_handler(h):
|
||||
# trace.
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed handle request via %r: %r: %s",
|
||||
h,
|
||||
"Failed handle request via %r: %r",
|
||||
request.request_metrics.name,
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
# Only respond with an error response if we haven't already started
|
||||
# writing, otherwise lets just kill the connection
|
||||
@@ -169,18 +169,18 @@ def _return_html_error(f, request):
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"Failed handle request %r: %s",
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
else:
|
||||
code = http_client.INTERNAL_SERVER_ERROR
|
||||
msg = "Internal server error"
|
||||
|
||||
logger.error(
|
||||
"Failed handle request %r: %s",
|
||||
"Failed handle request %r",
|
||||
request,
|
||||
f.getTraceback().rstrip(),
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
|
||||
body = HTML_ERROR_TEMPLATE.format(
|
||||
|
||||
@@ -274,8 +274,6 @@ pending_calls_metric = Histogram(
|
||||
# Federation Metrics
|
||||
#
|
||||
|
||||
sent_edus_counter = Counter("synapse_federation_client_sent_edus", "")
|
||||
|
||||
sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
|
||||
|
||||
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
|
||||
|
||||
@@ -79,7 +79,7 @@ class ModuleApi(object):
|
||||
Returns:
|
||||
Deferred: a 2-tuple of (user_id, access_token)
|
||||
"""
|
||||
reg = self.hs.get_handlers().registration_handler
|
||||
reg = self.hs.get_registration_handler()
|
||||
return reg.register(localpart=localpart)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -32,9 +32,25 @@ if six.PY3:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
|
||||
http_push_processed_counter = Counter(
|
||||
"synapse_http_httppusher_http_pushes_processed",
|
||||
"Number of push notifications successfully sent",
|
||||
)
|
||||
|
||||
http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
|
||||
http_push_failed_counter = Counter(
|
||||
"synapse_http_httppusher_http_pushes_failed",
|
||||
"Number of push notifications which failed",
|
||||
)
|
||||
|
||||
http_badges_processed_counter = Counter(
|
||||
"synapse_http_httppusher_badge_updates_processed",
|
||||
"Number of badge updates successfully sent",
|
||||
)
|
||||
|
||||
http_badges_failed_counter = Counter(
|
||||
"synapse_http_httppusher_badge_updates_failed",
|
||||
"Number of badge updates which failed",
|
||||
)
|
||||
|
||||
|
||||
class HttpPusher(object):
|
||||
@@ -81,6 +97,11 @@ class HttpPusher(object):
|
||||
pusherdict['pushkey'],
|
||||
)
|
||||
|
||||
if self.data is None:
|
||||
raise PusherConfigException(
|
||||
"data can not be null for HTTP pusher"
|
||||
)
|
||||
|
||||
if 'url' not in self.data:
|
||||
raise PusherConfigException(
|
||||
"'url' required in data for HTTP pusher"
|
||||
@@ -333,10 +354,10 @@ class HttpPusher(object):
|
||||
defer.returnValue([])
|
||||
try:
|
||||
resp = yield self.http_client.post_json_get_json(self.url, notification_dict)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
"Failed to push event %s to %s",
|
||||
event.event_id, self.name, exc_info=True,
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to push event %s to %s: %s %s",
|
||||
event.event_id, self.name, type(e), e,
|
||||
)
|
||||
defer.returnValue(False)
|
||||
rejected = []
|
||||
@@ -346,6 +367,10 @@ class HttpPusher(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_badge(self, badge):
|
||||
"""
|
||||
Args:
|
||||
badge (int): number of unread messages
|
||||
"""
|
||||
logger.info("Sending updated badge count %d to %s", badge, self.name)
|
||||
d = {
|
||||
'notification': {
|
||||
@@ -366,14 +391,11 @@ class HttpPusher(object):
|
||||
}
|
||||
}
|
||||
try:
|
||||
resp = yield self.http_client.post_json_get_json(self.url, d)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
"Failed to send badge count to %s",
|
||||
self.name, exc_info=True,
|
||||
yield self.http_client.post_json_get_json(self.url, d)
|
||||
http_badges_processed_counter.inc()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to send badge count to %s: %s %s",
|
||||
self.name, type(e), e,
|
||||
)
|
||||
defer.returnValue(False)
|
||||
rejected = []
|
||||
if 'rejected' in resp:
|
||||
rejected = resp['rejected']
|
||||
defer.returnValue(rejected)
|
||||
http_badges_failed_counter.inc()
|
||||
|
||||
@@ -52,11 +52,12 @@ class PusherFactory(object):
|
||||
logger.info("defined email pusher type")
|
||||
|
||||
def create_pusher(self, pusherdict):
|
||||
logger.info("trying to create_pusher for %r", pusherdict)
|
||||
|
||||
if pusherdict['kind'] in self.pusher_types:
|
||||
logger.info("found pusher")
|
||||
return self.pusher_types[pusherdict['kind']](self.hs, pusherdict)
|
||||
kind = pusherdict['kind']
|
||||
f = self.pusher_types.get(kind, None)
|
||||
if not f:
|
||||
return None
|
||||
logger.debug("creating %s pusher for %r", kind, pusherdict)
|
||||
return f(self.hs, pusherdict)
|
||||
|
||||
def _create_email_pusher(self, _hs, pusherdict):
|
||||
app_name = self._app_name_from_pusherdict(pusherdict)
|
||||
|
||||
@@ -19,6 +19,7 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.push import PusherConfigException
|
||||
from synapse.push.pusher import PusherFactory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -140,6 +141,10 @@ class PusherPool:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_new_notifications(self, min_stream_id, max_stream_id):
|
||||
if not self.pushers:
|
||||
# nothing to do here.
|
||||
return
|
||||
|
||||
try:
|
||||
users_affected = yield self.store.get_push_action_users_in_range(
|
||||
min_stream_id, max_stream_id
|
||||
@@ -155,6 +160,10 @@ class PusherPool:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
|
||||
if not self.pushers:
|
||||
# nothing to do here.
|
||||
return
|
||||
|
||||
try:
|
||||
# Need to subtract 1 from the minimum because the lower bound here
|
||||
# is not inclusive
|
||||
@@ -214,6 +223,15 @@ class PusherPool:
|
||||
"""
|
||||
try:
|
||||
p = self.pusher_factory.create_pusher(pusherdict)
|
||||
except PusherConfigException as e:
|
||||
logger.warning(
|
||||
"Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
|
||||
pusherdict.get('user_name'),
|
||||
pusherdict.get('app_id'),
|
||||
pusherdict.get('pushkey'),
|
||||
e,
|
||||
)
|
||||
return
|
||||
except Exception:
|
||||
logger.exception("Couldn't start a pusher: caught Exception")
|
||||
return
|
||||
|
||||
@@ -85,7 +85,8 @@ CONDITIONAL_REQUIREMENTS = {
|
||||
|
||||
"saml2": ["pysaml2>=4.5.0"],
|
||||
"url_preview": ["lxml>=3.5.0"],
|
||||
"test": ["mock>=2.0"],
|
||||
"test": ["mock>=2.0", "parameterized"],
|
||||
"sentry": ["sentry-sdk>=0.7.2"],
|
||||
}
|
||||
|
||||
|
||||
@@ -143,9 +144,12 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
|
||||
for dependency in OPTS:
|
||||
try:
|
||||
_get_distribution(dependency)
|
||||
except VersionConflict:
|
||||
except VersionConflict as e:
|
||||
deps_needed.append(dependency)
|
||||
errors.append("Needed %s but it was not installed" % (dependency,))
|
||||
errors.append(
|
||||
"Needed optional %s, got %s==%s"
|
||||
% (dependency, e.dist.project_name, e.dist.version)
|
||||
)
|
||||
except DistributionNotFound:
|
||||
# If it's not found, we don't care
|
||||
pass
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.replication.http import federation, membership, send_event
|
||||
from synapse.replication.http import federation, login, membership, register, send_event
|
||||
|
||||
REPLICATION_PREFIX = "/_synapse/replication"
|
||||
|
||||
@@ -28,3 +28,5 @@ class ReplicationRestResource(JsonResource):
|
||||
send_event.register_servlets(hs, self)
|
||||
membership.register_servlets(hs, self)
|
||||
federation.register_servlets(hs, self)
|
||||
login.register_servlets(hs, self)
|
||||
register.register_servlets(hs, self)
|
||||
|
||||
74
synapse/replication/http/login.py
Normal file
74
synapse/replication/http/login.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
"""Ensure a device is registered, generating a new access token for the
|
||||
device.
|
||||
|
||||
Used during registration and login.
|
||||
"""
|
||||
|
||||
NAME = "device_check_registered"
|
||||
PATH_ARGS = ("user_id",)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(RegisterDeviceReplicationServlet, self).__init__(hs)
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
|
||||
@staticmethod
|
||||
def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
|
||||
"""
|
||||
Args:
|
||||
device_id (str|None): Device ID to use, if None a new one is
|
||||
generated.
|
||||
initial_display_name (str|None)
|
||||
is_guest (bool)
|
||||
"""
|
||||
return {
|
||||
"device_id": device_id,
|
||||
"initial_display_name": initial_display_name,
|
||||
"is_guest": is_guest,
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
device_id = content["device_id"]
|
||||
initial_display_name = content["initial_display_name"]
|
||||
is_guest = content["is_guest"]
|
||||
|
||||
device_id, access_token = yield self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest,
|
||||
)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"device_id": device_id,
|
||||
"access_token": access_token,
|
||||
}))
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
RegisterDeviceReplicationServlet(hs).register(http_server)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user