Compare commits
383 Commits
v0.34.0.1
...
anoa/insta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9690e9dbac | ||
|
|
6fba9fd20c | ||
|
|
b8b898666e | ||
|
|
ad7ac8853c | ||
|
|
c74b96755c | ||
|
|
fbaee26c68 | ||
|
|
d534a27fe8 | ||
|
|
81b7e7eed3 | ||
|
|
e25ab58c5e | ||
|
|
ed8c5e4cda | ||
|
|
a5d0c771a3 | ||
|
|
6587b0b89b | ||
|
|
a4f52a33fe | ||
|
|
7615a8ced1 | ||
|
|
43c6fca960 | ||
|
|
e87d7a4b0f | ||
|
|
e6a7a15f93 | ||
|
|
270f212a2a | ||
|
|
a79034aedf | ||
|
|
c7b24ac3d0 | ||
|
|
f6813919e8 | ||
|
|
283753c33a | ||
|
|
09a1a6b55e | ||
|
|
928c50b59a | ||
|
|
b37e8c9572 | ||
|
|
f834d98402 | ||
|
|
bc5f6e1797 | ||
|
|
3f189c902e | ||
|
|
ee4df7fd7a | ||
|
|
c5a0f82cca | ||
|
|
9c850d9d5e | ||
|
|
2f46804055 | ||
|
|
c7837dce24 | ||
|
|
457fbfaf22 | ||
|
|
2a7f0b8953 | ||
|
|
47d03a79fc | ||
|
|
0b24d58e05 | ||
|
|
ebcffbc3eb | ||
|
|
a1b0e1879b | ||
|
|
e12313ba25 | ||
|
|
67b82f1336 | ||
|
|
afeea319df | ||
|
|
7740eddd04 | ||
|
|
ff2f65d737 | ||
|
|
655ce037fd | ||
|
|
f46a818ce5 | ||
|
|
a696c48133 | ||
|
|
c21b7cbc09 | ||
|
|
4db252c073 | ||
|
|
6d23ec2111 | ||
|
|
b5d510ad64 | ||
|
|
6f9cdc2d47 | ||
|
|
47e2dd1994 | ||
|
|
38590a4870 | ||
|
|
82165eeb05 | ||
|
|
03b086647f | ||
|
|
3680bc18e9 | ||
|
|
84af577356 | ||
|
|
e4bef9d470 | ||
|
|
b40abe0724 | ||
|
|
610f0830b0 | ||
|
|
840068bd78 | ||
|
|
8e3d34e3c5 | ||
|
|
55d9024835 | ||
|
|
cc2d650ef7 | ||
|
|
b1fffca345 | ||
|
|
770b823445 | ||
|
|
d02c5ccb11 | ||
|
|
19259d903c | ||
|
|
f03b3a7a3a | ||
|
|
9adbc912b3 | ||
|
|
e65a17b26f | ||
|
|
6bd4374636 | ||
|
|
b8d75ef53e | ||
|
|
99e36d5e24 | ||
|
|
b82a76c384 | ||
|
|
2562319821 | ||
|
|
4eeb2fb215 | ||
|
|
5891a6edc8 | ||
|
|
fb99dae9c8 | ||
|
|
5488cadaae | ||
|
|
64c1bd1d21 | ||
|
|
7d1024d574 | ||
|
|
7709d2bd16 | ||
|
|
7a3ec5b022 | ||
|
|
0c55b7701c | ||
|
|
6598992b01 | ||
|
|
a124025dab | ||
|
|
ff37acb8ce | ||
|
|
aee39f7de8 | ||
|
|
5180f12bae | ||
|
|
a388d59d44 | ||
|
|
be47cfa9c9 | ||
|
|
554ca58ea1 | ||
|
|
f815bd7feb | ||
|
|
073f6c2e5e | ||
|
|
40638ae7f5 | ||
|
|
9fa3c6ffa3 | ||
|
|
28efc80723 | ||
|
|
b6b73a0bcf | ||
|
|
327b992e17 | ||
|
|
94fb63e44f | ||
|
|
17709f8f9c | ||
|
|
f2b553d656 | ||
|
|
f1a04462eb | ||
|
|
c4045647eb | ||
|
|
6901ac7e9d | ||
|
|
8cbc99cc19 | ||
|
|
d414f30019 | ||
|
|
d758d5310e | ||
|
|
5d976c0c7c | ||
|
|
1977a9b006 | ||
|
|
b872c7b1b4 | ||
|
|
b8bea3424f | ||
|
|
855a151015 | ||
|
|
7072fe3084 | ||
|
|
88f4df85ca | ||
|
|
f0e96ab66a | ||
|
|
4026d555fa | ||
|
|
1ce463963d | ||
|
|
57a3e96e8e | ||
|
|
d04ff2a03d | ||
|
|
3bd0f1a4a3 | ||
|
|
ff05ad147a | ||
|
|
0fd5b3b53e | ||
|
|
51958df766 | ||
|
|
d840019192 | ||
|
|
f01c7488ab | ||
|
|
ae2a957dba | ||
|
|
893107be78 | ||
|
|
b6dce9b9fd | ||
|
|
57c035debe | ||
|
|
431e485914 | ||
|
|
4a3f138832 | ||
|
|
8520bc3109 | ||
|
|
9244a3089e | ||
|
|
da0d2219d2 | ||
|
|
8265995498 | ||
|
|
0b3fd1401f | ||
|
|
c4cdafa81f | ||
|
|
516456b763 | ||
|
|
6f3fda79ce | ||
|
|
95f871fc0d | ||
|
|
a3f0556bea | ||
|
|
821b65aeb5 | ||
|
|
0862d35b8e | ||
|
|
53ef4da8c2 | ||
|
|
b1b6dba2d2 | ||
|
|
8dcfa6e75c | ||
|
|
1953067136 | ||
|
|
5d881cbcb8 | ||
|
|
678a92cb56 | ||
|
|
9770ed91c2 | ||
|
|
a50cf929c1 | ||
|
|
28c21cd578 | ||
|
|
829a7b2032 | ||
|
|
be6a7e47fa | ||
|
|
03b7df1af2 | ||
|
|
62514bb81b | ||
|
|
efb8ed1d45 | ||
|
|
edc1e21dbe | ||
|
|
80bcca659e | ||
|
|
664b7a2920 | ||
|
|
d148c43050 | ||
|
|
26f44164c8 | ||
|
|
5ee1f997a8 | ||
|
|
9139b87be4 | ||
|
|
b8082a5445 | ||
|
|
a383289b0d | ||
|
|
e1781b043b | ||
|
|
03c85335d1 | ||
|
|
8ea509a935 | ||
|
|
50c396a7ee | ||
|
|
075ff3ede9 | ||
|
|
5b1dc94083 | ||
|
|
a2d85144e5 | ||
|
|
4a6e863843 | ||
|
|
8c58c10697 | ||
|
|
f4697b5ec1 | ||
|
|
afd69a0920 | ||
|
|
e1c8440e0c | ||
|
|
10b89d5c2e | ||
|
|
2a360e834f | ||
|
|
5541645e80 | ||
|
|
068aa1d228 | ||
|
|
5c41b22359 | ||
|
|
0e27501ee5 | ||
|
|
e8c9f15397 | ||
|
|
1f2058fca5 | ||
|
|
92d8a068ad | ||
|
|
58f6c48183 | ||
|
|
97fd29c019 | ||
|
|
e79ba9eb34 | ||
|
|
073173277c | ||
|
|
f431ff3fb8 | ||
|
|
17898a5ba6 | ||
|
|
7c288c2250 | ||
|
|
07f62da55a | ||
|
|
183738f469 | ||
|
|
886e5acc76 | ||
|
|
2a8edbaf74 | ||
|
|
fd654a4d54 | ||
|
|
c5a125b24f | ||
|
|
4a8b715679 | ||
|
|
a4ef8d8dd5 | ||
|
|
6a41d2a187 | ||
|
|
67cd4dad81 | ||
|
|
6b90ae6efc | ||
|
|
a0ae475219 | ||
|
|
cb0e637a94 | ||
|
|
82a92ba535 | ||
|
|
be1065af59 | ||
|
|
2f88881c93 | ||
|
|
4cd50d983d | ||
|
|
c5a296b10c | ||
|
|
6b574f3df7 | ||
|
|
90743c9d89 | ||
|
|
6129e52f43 | ||
|
|
d02c4532c0 | ||
|
|
7021784d46 | ||
|
|
53a327b4d5 | ||
|
|
c66f4bf7f1 | ||
|
|
fe212bbe4a | ||
|
|
7871146667 | ||
|
|
44be7513bf | ||
|
|
c99c2d58d7 | ||
|
|
d619b113ed | ||
|
|
12699a701f | ||
|
|
7f503f83b9 | ||
|
|
388c164aea | ||
|
|
1c9704f8ab | ||
|
|
766a172b99 | ||
|
|
c658425e6f | ||
|
|
2557531f0f | ||
|
|
117bc94cd2 | ||
|
|
277e50462d | ||
|
|
c4875d8c76 | ||
|
|
c433f61091 | ||
|
|
c9bfb058d8 | ||
|
|
8086a5c05e | ||
|
|
6bfa735a69 | ||
|
|
48951f437f | ||
|
|
8c85f0833d | ||
|
|
25d64a846a | ||
|
|
ea8903fcc9 | ||
|
|
887ca93a1b | ||
|
|
1f18c7cfc9 | ||
|
|
4ff6d22245 | ||
|
|
75942af1db | ||
|
|
33a55289cb | ||
|
|
23b0813599 | ||
|
|
1b53cc3cb4 | ||
|
|
5349262302 | ||
|
|
83f335bedf | ||
|
|
35e1d67b4e | ||
|
|
5f54765587 | ||
|
|
702c4b750c | ||
|
|
25dd56ace3 | ||
|
|
71b94eac46 | ||
|
|
de6888e7ce | ||
|
|
cb80db8941 | ||
|
|
df3a661e4a | ||
|
|
676cf2ee26 | ||
|
|
9feb5d0b71 | ||
|
|
3982a6ee07 | ||
|
|
05e1296649 | ||
|
|
f788c9eb70 | ||
|
|
7f1a6a4ea5 | ||
|
|
3b31a54a6e | ||
|
|
64cf6788d9 | ||
|
|
fab948120f | ||
|
|
aa955f2d15 | ||
|
|
9ec56d6935 | ||
|
|
4a4d2e17bc | ||
|
|
1a8f4139a5 | ||
|
|
bb63e7ca4f | ||
|
|
7e41545e8b | ||
|
|
4fd051f9c3 | ||
|
|
b5b868d41e | ||
|
|
34b25dcc8e | ||
|
|
a35c66a00b | ||
|
|
91fa34b3fa | ||
|
|
522dada206 | ||
|
|
ea00f18135 | ||
|
|
c0dba73aa0 | ||
|
|
8c818af38e | ||
|
|
5c792ee5c3 | ||
|
|
6dc06c3775 | ||
|
|
3933ce9f13 | ||
|
|
de80e979c9 | ||
|
|
e0910d0145 | ||
|
|
ba41aeed6a | ||
|
|
4f24452ead | ||
|
|
aa70d24125 | ||
|
|
efc522c55e | ||
|
|
566947ff34 | ||
|
|
353f2407b7 | ||
|
|
8d4b4e781f | ||
|
|
95fca1c7e9 | ||
|
|
58fe88c47e | ||
|
|
2394e832a8 | ||
|
|
d566e6b17a | ||
|
|
998f5225c1 | ||
|
|
55c3e853c5 | ||
|
|
484867d35d | ||
|
|
7960c26fda | ||
|
|
0dce21ba77 | ||
|
|
34ea14139d | ||
|
|
d1d81d0651 | ||
|
|
d91b99abe0 | ||
|
|
d422570e9b | ||
|
|
c4530b97b2 | ||
|
|
83c50bf752 | ||
|
|
1371d5b798 | ||
|
|
32172f2297 | ||
|
|
ba85786d71 | ||
|
|
bc1a4b5576 | ||
|
|
b970cb0e96 | ||
|
|
1dcb086f33 | ||
|
|
26e5abf20d | ||
|
|
bc1fa8cd01 | ||
|
|
b7c0218812 | ||
|
|
a27e501b09 | ||
|
|
29f20a8a1a | ||
|
|
27128145e6 | ||
|
|
dd3bf40152 | ||
|
|
37f8bdc1d5 | ||
|
|
f95581332f | ||
|
|
84b6fae1f5 | ||
|
|
265513e499 | ||
|
|
252c0c81fa | ||
|
|
e9cdfedff3 | ||
|
|
7134832c01 | ||
|
|
7975d39cbd | ||
|
|
0e62fcd0eb | ||
|
|
d7843f47b6 | ||
|
|
98df67a8de | ||
|
|
0708f437cc | ||
|
|
cd018e3543 | ||
|
|
f85676cc93 | ||
|
|
3355c1a9ec | ||
|
|
597dafb5e8 | ||
|
|
9c2af7b2c5 | ||
|
|
f3561f8d86 | ||
|
|
ea6abf6724 | ||
|
|
c26f49a664 | ||
|
|
48b7ff7a35 | ||
|
|
d9aaf26539 | ||
|
|
c8d32caba3 | ||
|
|
fc9cdbabe7 | ||
|
|
5dd9a381c5 | ||
|
|
0b26feb422 | ||
|
|
3b2ba2fbb8 | ||
|
|
da8628ba2c | ||
|
|
81b513416e | ||
|
|
668e6625b0 | ||
|
|
1dc7492ce5 | ||
|
|
df89f8afb8 | ||
|
|
7e22cd90f5 | ||
|
|
d2f7c4e6b1 | ||
|
|
e93a0ebf50 | ||
|
|
cef8ae272a | ||
|
|
a83826ae99 | ||
|
|
5986a10f16 | ||
|
|
f208f608cb | ||
|
|
89ac2a5bdb | ||
|
|
989f1167af | ||
|
|
9b1c19e0c5 | ||
|
|
74c3606c53 | ||
|
|
25219b7b4e | ||
|
|
fee831c040 | ||
|
|
466c1f3e01 | ||
|
|
91206e09f2 | ||
|
|
dbf736ba66 | ||
|
|
912a843294 | ||
|
|
5a1c6f45be | ||
|
|
30da50a5b8 | ||
|
|
35e13477cf | ||
|
|
c7401a697f | ||
|
|
c588b9b9e4 | ||
|
|
b0c24a66ec | ||
|
|
9a3e24a13d | ||
|
|
e8d98466b0 |
@@ -13,8 +13,8 @@ jobs:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_SHA1} .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
|
||||
15
.codecov.yml
Normal file
15
.codecov.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
comment:
|
||||
layout: "diff"
|
||||
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 0 # Target % coverage, can be auto. Turned off for now
|
||||
threshold: null
|
||||
base: auto
|
||||
patch:
|
||||
default:
|
||||
target: 0
|
||||
threshold: null
|
||||
base: auto
|
||||
@@ -1,12 +1,7 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
source = synapse
|
||||
|
||||
[paths]
|
||||
source=
|
||||
coverage
|
||||
include = synapse/*
|
||||
|
||||
[report]
|
||||
precision = 2
|
||||
ignore_errors = True
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -3,5 +3,5 @@
|
||||
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](CONTRIBUTING.rst#changelog)
|
||||
* [ ] Pull request includes a [sign off](CONTRIBUTING.rst#sign-off)
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -12,6 +12,7 @@ dbs/
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
pip-wheel-metadata/
|
||||
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
@@ -25,9 +26,9 @@ homeserver*.pid
|
||||
*.tls.dh
|
||||
*.tls.key
|
||||
|
||||
.coverage
|
||||
.coverage.*
|
||||
!.coverage.rc
|
||||
.coverage*
|
||||
coverage.*
|
||||
!.coveragerc
|
||||
htmlcov
|
||||
|
||||
demo/*/*.db
|
||||
@@ -59,7 +60,5 @@ env/
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
|
||||
*.deb
|
||||
|
||||
/debs
|
||||
|
||||
10
.travis.yml
10
.travis.yml
@@ -12,6 +12,9 @@ cache:
|
||||
#
|
||||
- $HOME/.cache/pip/wheels
|
||||
|
||||
addons:
|
||||
postgresql: "9.4"
|
||||
|
||||
# don't clone the whole repo history, one commit will do
|
||||
git:
|
||||
depth: 1
|
||||
@@ -68,6 +71,13 @@ matrix:
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
# if we don't have python3.6 in this environment, travis unhelpfully gives us
|
||||
# a `python3.6` on our path which does nothing but spit out a warning. Tox
|
||||
# tries to run it (even if we're not running a py36 env), so the build logs
|
||||
# then have warnings which look like errors. To reduce the noise, remove the
|
||||
# non-functional python3.6.
|
||||
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
|
||||
|
||||
script:
|
||||
- tox -e $TOX_ENV
|
||||
|
||||
@@ -65,4 +65,7 @@ Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
||||
|
||||
Serban Constantin <serban.constantin at gmail dot com>
|
||||
* Small bug fix
|
||||
* Small bug fix
|
||||
|
||||
Jason Robinson <jasonr at matrix.org>
|
||||
* Minor fixes
|
||||
|
||||
166
CHANGES.md
166
CHANGES.md
@@ -1,13 +1,167 @@
|
||||
Synapse 0.34.0.1 (2019-01-11)
|
||||
=============================
|
||||
|
||||
This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.0 and below.
|
||||
Synapse 0.99.0rc2 (2019-01-30)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix problem reading macaroon_secret_key from config
|
||||
([\#4373](https://github.com/matrix-org/synapse/issues/4373))
|
||||
- Fix bug when rejecting remote invites. ([\#4527](https://github.com/matrix-org/synapse/issues/4527))
|
||||
- Fix incorrect rendering of server capabilities. ([81b7e7eed](https://github.com/matrix-org/synapse/commit/81b7e7eed323f55d6550e7a270a9dc2c4c7b0fe0))
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add documentation on enabling ACME support when upgrading to v0.99. ([\#4528](https://github.com/matrix-org/synapse/issues/4528))
|
||||
|
||||
|
||||
Synapse 0.99.0rc1 (2019-01-30)
|
||||
==============================
|
||||
|
||||
Synapse v0.99.x is a precursor to the upcoming Synapse v1.0 release. It contains foundational changes to room architecture and the federation security model necessary to support the upcoming r0 release of the Server to Server API.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Synapse's cipher string has been updated to require ECDH key exchange. Configuring and generating dh_params is no longer required, and they will be ignored. ([\#4229](https://github.com/matrix-org/synapse/issues/4229))
|
||||
- Synapse can now automatically provision TLS certificates via ACME (the protocol used by CAs like Let's Encrypt). ([\#4384](https://github.com/matrix-org/synapse/issues/4384), [\#4492](https://github.com/matrix-org/synapse/issues/4492), [\#4525](https://github.com/matrix-org/synapse/issues/4525))
|
||||
- Implement MSC1708 (.well-known routing for server-server federation) ([\#4408](https://github.com/matrix-org/synapse/issues/4408), [\#4409](https://github.com/matrix-org/synapse/issues/4409), [\#4426](https://github.com/matrix-org/synapse/issues/4426), [\#4427](https://github.com/matrix-org/synapse/issues/4427), [\#4428](https://github.com/matrix-org/synapse/issues/4428), [\#4464](https://github.com/matrix-org/synapse/issues/4464), [\#4468](https://github.com/matrix-org/synapse/issues/4468), [\#4487](https://github.com/matrix-org/synapse/issues/4487), [\#4488](https://github.com/matrix-org/synapse/issues/4488), [\#4489](https://github.com/matrix-org/synapse/issues/4489), [\#4497](https://github.com/matrix-org/synapse/issues/4497), [\#4511](https://github.com/matrix-org/synapse/issues/4511), [\#4516](https://github.com/matrix-org/synapse/issues/4516), [\#4520](https://github.com/matrix-org/synapse/issues/4520), [\#4521](https://github.com/matrix-org/synapse/issues/4521))
|
||||
- Search now includes results from predecessor rooms after a room upgrade. ([\#4415](https://github.com/matrix-org/synapse/issues/4415))
|
||||
- Config option to disable requesting MSISDN on registration. ([\#4423](https://github.com/matrix-org/synapse/issues/4423))
|
||||
- Add a metric for tracking event stream position of the user directory. ([\#4445](https://github.com/matrix-org/synapse/issues/4445))
|
||||
- Support exposing server capabilities in CS API (MSC1753, MSC1804) ([\#4472](https://github.com/matrix-org/synapse/issues/4472))
|
||||
- Add support for room version 3 ([\#4483](https://github.com/matrix-org/synapse/issues/4483), [\#4499](https://github.com/matrix-org/synapse/issues/4499), [\#4515](https://github.com/matrix-org/synapse/issues/4515), [\#4523](https://github.com/matrix-org/synapse/issues/4523))
|
||||
- Synapse will now reload TLS certificates from disk upon SIGHUP. ([\#4495](https://github.com/matrix-org/synapse/issues/4495), [\#4524](https://github.com/matrix-org/synapse/issues/4524))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Prevent users with access tokens predating the introduction of device IDs from creating spurious entries in the user_ips table. ([\#4369](https://github.com/matrix-org/synapse/issues/4369))
|
||||
- Fix typo in ALL_USER_TYPES definition to ensure type is a tuple ([\#4392](https://github.com/matrix-org/synapse/issues/4392))
|
||||
- Fix high CPU usage due to remote devicelist updates ([\#4397](https://github.com/matrix-org/synapse/issues/4397))
|
||||
- Fix potential bug where creating or joining a room could fail ([\#4404](https://github.com/matrix-org/synapse/issues/4404))
|
||||
- Fix bug when rejecting remote invites ([\#4405](https://github.com/matrix-org/synapse/issues/4405))
|
||||
- Fix incorrect logcontexts after a Deferred was cancelled ([\#4407](https://github.com/matrix-org/synapse/issues/4407))
|
||||
- Ensure encrypted room state is persisted across room upgrades. ([\#4411](https://github.com/matrix-org/synapse/issues/4411))
|
||||
- Copy over whether a room is a direct message and any associated room tags on room upgrade. ([\#4412](https://github.com/matrix-org/synapse/issues/4412))
|
||||
- Fix None guard in calling config.server.is_threepid_reserved ([\#4435](https://github.com/matrix-org/synapse/issues/4435))
|
||||
- Don't send IP addresses as SNI ([\#4452](https://github.com/matrix-org/synapse/issues/4452))
|
||||
- Fix UnboundLocalError in post_urlencoded_get_json ([\#4460](https://github.com/matrix-org/synapse/issues/4460))
|
||||
- Add a timeout to filtered room directory queries. ([\#4461](https://github.com/matrix-org/synapse/issues/4461))
|
||||
- Workaround for login error when using both LDAP and internal authentication. ([\#4486](https://github.com/matrix-org/synapse/issues/4486))
|
||||
- Fix a bug where setting a relative consent directory path would cause a crash. ([\#4512](https://github.com/matrix-org/synapse/issues/4512))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Synapse no longer generates self-signed TLS certificates when generating a configuration file. ([\#4509](https://github.com/matrix-org/synapse/issues/4509))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Synapse will now take advantage of native UPSERT functionality in PostgreSQL 9.5+ and SQLite 3.24+. ([\#4306](https://github.com/matrix-org/synapse/issues/4306), [\#4459](https://github.com/matrix-org/synapse/issues/4459), [\#4466](https://github.com/matrix-org/synapse/issues/4466), [\#4471](https://github.com/matrix-org/synapse/issues/4471), [\#4477](https://github.com/matrix-org/synapse/issues/4477), [\#4505](https://github.com/matrix-org/synapse/issues/4505))
|
||||
- Update README to use the new virtualenv everywhere ([\#4342](https://github.com/matrix-org/synapse/issues/4342))
|
||||
- Add better logging for unexpected errors while sending transactions ([\#4368](https://github.com/matrix-org/synapse/issues/4368))
|
||||
- Apply a unique index to the user_ips table, preventing duplicates. ([\#4370](https://github.com/matrix-org/synapse/issues/4370), [\#4432](https://github.com/matrix-org/synapse/issues/4432), [\#4434](https://github.com/matrix-org/synapse/issues/4434))
|
||||
- Silence travis-ci build warnings by removing non-functional python3.6 ([\#4377](https://github.com/matrix-org/synapse/issues/4377))
|
||||
- Fix a comment in the generated config file ([\#4387](https://github.com/matrix-org/synapse/issues/4387))
|
||||
- Add ground work for implementing future federation API versions ([\#4390](https://github.com/matrix-org/synapse/issues/4390))
|
||||
- Update dependencies on msgpack and pymacaroons to use the up-to-date packages. ([\#4399](https://github.com/matrix-org/synapse/issues/4399))
|
||||
- Tweak codecov settings to make them less loud. ([\#4400](https://github.com/matrix-org/synapse/issues/4400))
|
||||
- Implement server support for MSC1794 - Federation v2 Invite API ([\#4402](https://github.com/matrix-org/synapse/issues/4402))
|
||||
- debian package: symlink to explicit python version ([\#4433](https://github.com/matrix-org/synapse/issues/4433))
|
||||
- Add infrastructure to support different event formats ([\#4437](https://github.com/matrix-org/synapse/issues/4437), [\#4447](https://github.com/matrix-org/synapse/issues/4447), [\#4448](https://github.com/matrix-org/synapse/issues/4448), [\#4470](https://github.com/matrix-org/synapse/issues/4470), [\#4481](https://github.com/matrix-org/synapse/issues/4481), [\#4482](https://github.com/matrix-org/synapse/issues/4482), [\#4493](https://github.com/matrix-org/synapse/issues/4493), [\#4494](https://github.com/matrix-org/synapse/issues/4494), [\#4496](https://github.com/matrix-org/synapse/issues/4496), [\#4510](https://github.com/matrix-org/synapse/issues/4510), [\#4514](https://github.com/matrix-org/synapse/issues/4514))
|
||||
- Generate the debian config during build ([\#4444](https://github.com/matrix-org/synapse/issues/4444))
|
||||
- Clarify documentation for the `public_baseurl` config param ([\#4458](https://github.com/matrix-org/synapse/issues/4458), [\#4498](https://github.com/matrix-org/synapse/issues/4498))
|
||||
- Fix quoting for allowed_local_3pids example config ([\#4476](https://github.com/matrix-org/synapse/issues/4476))
|
||||
- Remove deprecated --process-dependency-links option from UPGRADE.rst ([\#4485](https://github.com/matrix-org/synapse/issues/4485))
|
||||
- Make it possible to set the log level for tests via an environment variable ([\#4506](https://github.com/matrix-org/synapse/issues/4506))
|
||||
- Reduce the log level of linearizer lock acquirement to DEBUG. ([\#4507](https://github.com/matrix-org/synapse/issues/4507))
|
||||
- Fix code to comply with linting in PyFlakes 3.7.1. ([\#4519](https://github.com/matrix-org/synapse/issues/4519))
|
||||
|
||||
|
||||
Synapse 0.34.1.1 (2019-01-11)
|
||||
=============================
|
||||
|
||||
This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.1.
|
||||
|
||||
This release is compatible with Python 2.7 and 3.5+. Python 3.7 is fully supported.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix spontaneous logout on upgrade
|
||||
([\#4374](https://github.com/matrix-org/synapse/issues/4374))
|
||||
|
||||
|
||||
Synapse 0.34.1 (2019-01-09)
|
||||
===========================
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add better logging for unexpected errors while sending transactions ([\#4361](https://github.com/matrix-org/synapse/issues/4361), [\#4362](https://github.com/matrix-org/synapse/issues/4362))
|
||||
|
||||
|
||||
Synapse 0.34.1rc1 (2019-01-08)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Special-case a support user for use in verifying behaviour of a given server. The support user does not appear in user directory or monthly active user counts. ([\#4141](https://github.com/matrix-org/synapse/issues/4141), [\#4344](https://github.com/matrix-org/synapse/issues/4344))
|
||||
- Support for serving .well-known files ([\#4262](https://github.com/matrix-org/synapse/issues/4262))
|
||||
- Rework SAML2 authentication ([\#4265](https://github.com/matrix-org/synapse/issues/4265), [\#4267](https://github.com/matrix-org/synapse/issues/4267))
|
||||
- SAML2 authentication: Initialise user display name from SAML2 data ([\#4272](https://github.com/matrix-org/synapse/issues/4272))
|
||||
- Synapse can now have its conditional/extra dependencies installed by pip. This functionality can be used by using `pip install matrix-synapse[feature]`, where feature is a comma separated list with the possible values `email.enable_notifs`, `matrix-synapse-ldap3`, `postgres`, `resources.consent`, `saml2`, `url_preview`, and `test`. If you want to install all optional dependencies, you can use "all" instead. ([\#4298](https://github.com/matrix-org/synapse/issues/4298), [\#4325](https://github.com/matrix-org/synapse/issues/4325), [\#4327](https://github.com/matrix-org/synapse/issues/4327))
|
||||
- Add routes for reading account data. ([\#4303](https://github.com/matrix-org/synapse/issues/4303))
|
||||
- Add opt-in support for v2 rooms ([\#4307](https://github.com/matrix-org/synapse/issues/4307))
|
||||
- Add a script to generate a clean config file ([\#4315](https://github.com/matrix-org/synapse/issues/4315))
|
||||
- Return server data in /login response ([\#4319](https://github.com/matrix-org/synapse/issues/4319))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix contains_url check to be consistent with other instances in code-base and check that value is an instance of string. ([\#3405](https://github.com/matrix-org/synapse/issues/3405))
|
||||
- Fix CAS login when username is not valid in an MXID ([\#4264](https://github.com/matrix-org/synapse/issues/4264))
|
||||
- Send CORS headers for /media/config ([\#4279](https://github.com/matrix-org/synapse/issues/4279))
|
||||
- Add 'sandbox' to CSP for media reprository ([\#4284](https://github.com/matrix-org/synapse/issues/4284))
|
||||
- Make the new landing page prettier. ([\#4294](https://github.com/matrix-org/synapse/issues/4294))
|
||||
- Fix deleting E2E room keys when using old SQLite versions. ([\#4295](https://github.com/matrix-org/synapse/issues/4295))
|
||||
- The metric synapse_admin_mau:current previously did not update when config.mau_stats_only was set to True ([\#4305](https://github.com/matrix-org/synapse/issues/4305))
|
||||
- Fixed per-room account data filters ([\#4309](https://github.com/matrix-org/synapse/issues/4309))
|
||||
- Fix indentation in default config ([\#4313](https://github.com/matrix-org/synapse/issues/4313))
|
||||
- Fix synapse:latest docker upload ([\#4316](https://github.com/matrix-org/synapse/issues/4316))
|
||||
- Fix test_metric.py compatibility with prometheus_client 0.5. Contributed by Maarten de Vries <maarten@de-vri.es>. ([\#4317](https://github.com/matrix-org/synapse/issues/4317))
|
||||
- Avoid packaging _trial_temp directory in -py3 debian packages ([\#4326](https://github.com/matrix-org/synapse/issues/4326))
|
||||
- Check jinja version for consent resource ([\#4327](https://github.com/matrix-org/synapse/issues/4327))
|
||||
- fix NPE in /messages by checking if all events were filtered out ([\#4330](https://github.com/matrix-org/synapse/issues/4330))
|
||||
- Fix `python -m synapse.config` on Python 3. ([\#4356](https://github.com/matrix-org/synapse/issues/4356))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the deprecated v1/register API on Python 2. It was never ported to Python 3. ([\#4334](https://github.com/matrix-org/synapse/issues/4334))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Getting URL previews of IP addresses no longer fails on Python 3. ([\#4215](https://github.com/matrix-org/synapse/issues/4215))
|
||||
- drop undocumented dependency on dateutil ([\#4266](https://github.com/matrix-org/synapse/issues/4266))
|
||||
- Update the example systemd config to use a virtualenv ([\#4273](https://github.com/matrix-org/synapse/issues/4273))
|
||||
- Update link to kernel DCO guide ([\#4274](https://github.com/matrix-org/synapse/issues/4274))
|
||||
- Make isort tox check print diff when it fails ([\#4283](https://github.com/matrix-org/synapse/issues/4283))
|
||||
- Log room_id in Unknown room errors ([\#4297](https://github.com/matrix-org/synapse/issues/4297))
|
||||
- Documentation improvements for coturn setup. Contributed by Krithin Sitaram. ([\#4333](https://github.com/matrix-org/synapse/issues/4333))
|
||||
- Update pull request template to use absolute links ([\#4341](https://github.com/matrix-org/synapse/issues/4341))
|
||||
- Update README to not lie about required restart when updating TLS certificates ([\#4343](https://github.com/matrix-org/synapse/issues/4343))
|
||||
- Update debian packaging for compatibility with transitional package ([\#4349](https://github.com/matrix-org/synapse/issues/4349))
|
||||
- Fix command hint to generate a config file when trying to start without a config file ([\#4353](https://github.com/matrix-org/synapse/issues/4353))
|
||||
- Add better logging for unexpected errors while sending transactions ([\#4358](https://github.com/matrix-org/synapse/issues/4358))
|
||||
|
||||
|
||||
Synapse 0.34.0 (2018-12-20)
|
||||
|
||||
@@ -102,7 +102,7 @@ Sign off
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
|
||||
`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
|
||||
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
|
||||
@@ -15,6 +15,7 @@ recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/res *
|
||||
@@ -37,6 +38,7 @@ prune docker
|
||||
prune .circleci
|
||||
prune .coveragerc
|
||||
prune debian
|
||||
prune .codecov.yml
|
||||
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
356
README.rst
356
README.rst
@@ -4,15 +4,15 @@ Introduction
|
||||
============
|
||||
|
||||
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs).
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
@@ -86,113 +86,222 @@ Synapse is the reference Python/Twisted Matrix homeserver implementation.
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5, 3.6, or 2.7
|
||||
- Python 3.5, 3.6, 3.7, or 2.7
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Installing from source
|
||||
----------------------
|
||||
The currently supported environment is [Ubuntu 18.04
|
||||
LTS](http://releases.ubuntu.com/18.04/).
|
||||
|
||||
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||
Instructions`_.)
|
||||
Recommended installation procedure
|
||||
----------------------------------
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
Building and running Synapse from source in a python3 environment is the
|
||||
recommended path for installation, as it is the most well-tested route.
|
||||
Binary packages are available for various platforms, but not officially
|
||||
supported by the Synapse team. See `Platform Specific Instructions`_ for
|
||||
details.
|
||||
|
||||
Install prerequisites
|
||||
*********************
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
sudo apt-get update && sudo apt-get dist-upgrade
|
||||
sudo apt-get install build-essential python3-dev python3-venv \
|
||||
python3-pip python-setuptools libssl-dev \
|
||||
libjpeg-dev libffi-dev zlib1g-dev \
|
||||
libxslt1-dev postgresql libwebp-dev libpq-dev
|
||||
|
||||
**TODO: Update and check non-debian distro pre-req's for new process**
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
python-setuptools python-virtualenv
|
||||
|
||||
Installing prerequisites on CentOS 7 or Fedora 25::
|
||||
Installing prerequisites on CentOS 7 or Fedora::
|
||||
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
|
||||
Installing prerequisites on Raspbian::
|
||||
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
sudo apt-get update && sudo apt-get dist-upgrade
|
||||
sudo apt-get install build-essential python3-dev python3-venv \
|
||||
python3-pip python-setuptools libssl-dev \
|
||||
libjpeg-dev libffi-dev zlib1g-dev \
|
||||
libxslt1-dev postgresql libwebp-dev libpq-dev
|
||||
|
||||
Installing prerequisites on openSUSE::
|
||||
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
Set up python environment
|
||||
*************************
|
||||
|
||||
Installing prerequisites on OpenBSD::
|
||||
Add a new user for Synapse and log in as them::
|
||||
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt jpeg
|
||||
useradd matrix
|
||||
su -l matrix
|
||||
|
||||
To install the Synapse homeserver run::
|
||||
Create a python3 virtualenv and install dependencies::
|
||||
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
python3 -m venv matrix-synapse
|
||||
./matrix-synapse/bin/python -m pip install -U pip setuptools wheel
|
||||
./matrix-synapse/bin/python -m pip install -U matrix-synapse[all]
|
||||
|
||||
This installs Synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/synapse/env``. Feel free to pick a different directory
|
||||
if you prefer.
|
||||
Create a Synapse configuration directory. **Make sure you change
|
||||
``matrix.mydomain.com`` to your own domain**::
|
||||
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag::
|
||||
mkdir cfg
|
||||
./matrix-synapse/bin/python -m synapse.app.homeserver --generate-config \
|
||||
-H matrix.mydomain.com \ # Change
|
||||
-c cfg/homeserver.yaml \
|
||||
--report-stats=yes
|
||||
|
||||
Installing postgres
|
||||
*******************
|
||||
|
||||
`PostgreSQL <https://www.postgresql.org/>`_ is the recommended database backend
|
||||
supported by Synapse. If you are upgrading from SQLite, please consult the
|
||||
`documentation on how to switch
|
||||
<https://github.com/matrix-org/synapse/blob/master/docs/postgres.rst#porting-from-sqlite>`_
|
||||
for improved performance.
|
||||
|
||||
Enable and start postgresql::
|
||||
|
||||
systemctl enable postgresql && systemctl start postgresql
|
||||
|
||||
Assuming your postgres user is called ``postgres``, login and create a user.
|
||||
This will prompt for a password, make sure you set a strong passphrase::
|
||||
|
||||
su - postgres
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
Create a Synapse database::
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
Finally, edit the ``database`` section in your ``cfg/homeserver.yaml`` file
|
||||
to point to the new database::
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: synapse_user
|
||||
password: <password defined in the createuser step>
|
||||
database: synapse
|
||||
host: localhost
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
|
||||
More information can be found at `Using Postgres with Synapse
|
||||
<docs/postgres.rst>`_.
|
||||
|
||||
Systemd
|
||||
*******
|
||||
|
||||
Running Synapse under `systemd <https://en.wikipedia.org/wiki/Systemd>`_ is
|
||||
recommended, as it allows for simple management and automatic restarts in case
|
||||
of a server error. To integrate Synapse with systemd, create a file at
|
||||
`/etc/systemd/system/synapse.service` with the following contents::
|
||||
|
||||
[Unit]
|
||||
Description="Synapse homeserver"
|
||||
|
||||
[Service]
|
||||
ExecStart=/home/matrix/matrix-synapse/bin/python -m synapse.app.homeserver
|
||||
PIDFile=/home/matrix/matrix-synapse/homeserver.pid
|
||||
Type=forking
|
||||
WorkingDirectory=/home/matrix/matrix-synapse/
|
||||
Restart=always
|
||||
|
||||
Then tell systemd to update service file information::
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Synapse should now be enabled to run under Systemd, but **don't start Synapse
|
||||
yet!**
|
||||
|
||||
|
||||
ACME setup
|
||||
**********
|
||||
|
||||
Synapse requires valid TLS certificates for communication between servers
|
||||
(port ``8448`` by default) in addition to those that are client-facing (port
|
||||
``443``). Synapse **will provision server-to-server certificates
|
||||
automatically for you for free** through `Let's Encrypt
|
||||
<https://letsencrypt.org/>`_ if you tell it to.
|
||||
|
||||
Note: Synapse does not currently hot-renew Let's Encrypt certificates for
|
||||
you, it only checks for certificates that need renewing on restart. This
|
||||
functionality will be implemented promptly, but if in the meantime your
|
||||
federation certificates expire, simply restarting Synapse should renew
|
||||
them automatically.
|
||||
|
||||
In order for Synapse to complete the ACME challenge to provision a
|
||||
certificate, it needs access to port 80. Typically listening on port 80 is
|
||||
only granted to applications running as root. There are thus two solutions to
|
||||
this problem.
|
||||
|
||||
**Using a reverse proxy**
|
||||
|
||||
A reverse proxy such as Apache or Nginx allows a single process (the web
|
||||
server) to listen on port 80 and redirect traffic to the appropriate program
|
||||
running on your server.
|
||||
|
||||
|
||||
|
||||
**Authbind**
|
||||
|
||||
``authbind`` allows a program which does not or should not run as root to
|
||||
bind to low-numbered ports in a controlled way. The setup is simpler, but
|
||||
requires a webserver not to already be running on port 80. **This includes
|
||||
every time Synapse renews a certificate**, which may be cumbersome if you
|
||||
usually run a web server on port 80. Nevertheless, if that isn't a concern,
|
||||
follow the instructions below.
|
||||
|
||||
Install ``authbind``. This can be done on Ubuntu/Debian with::
|
||||
|
||||
sudo apt-get install authbind
|
||||
|
||||
**Add authbind to the systemd script**
|
||||
|
||||
|
||||
**TODO: This right?** If you would like to use your own
|
||||
certificates, specifying them in Synapse's config file is sufficient.
|
||||
|
||||
|
||||
**TODO: Fit this in**
|
||||
These keys will allow your Home Server to identify itself to other Home
|
||||
Servers, so don't lose or delete them. It would be wise to back them up
|
||||
somewhere safe. (If, for whatever reason, you do need to change your Home
|
||||
Server's keys, you may find that other Home Servers have the old key cached.
|
||||
If you update the signing key, you should change the name of the key in the
|
||||
``<server name>.signing.key`` file (the second word) to something different.
|
||||
See `the spec`__ for more information on key management.)
|
||||
|
||||
**TODO: Does this still work?** This Synapse installation can then be later
|
||||
upgraded by using pip again with the update flag::
|
||||
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse
|
||||
pip install -U matrix-synapse[all]
|
||||
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
||||
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||
For more details, see
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
We have now created a "matrix" user with its own home directory that stores
|
||||
Synapse's data and configuration files, backed by a postgres database, all
|
||||
packaged into a isolated python virtual environment.
|
||||
|
||||
Configuring Synapse
|
||||
-------------------
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before)::
|
||||
|
||||
cd ~/.synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
... substituting an appropriate value for ``--server-name``. The server name
|
||||
determines the "domain" part of user-ids for users on your server: these will
|
||||
Before starting Synapse, inspect the ``cfg/homeserver.yaml`` file. ``server_name``
|
||||
determines the "domain" part of user-ids for users on your server, which will
|
||||
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||
set this to the hostname of your server. For a more production-ready setup, you
|
||||
@@ -200,16 +309,7 @@ will probably want to specify your domain (``example.com``) rather than a
|
||||
matrix-specific hostname here (in the same way that your email address is
|
||||
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||
doing so may require more advanced setup - see `Setting up
|
||||
Federation`_. Beware that the server name cannot be changed later.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||
different. See `the spec`__ for more information on key management.)
|
||||
Federation`_. **Be aware that the server name cannot be changed later.**
|
||||
|
||||
.. __: `key_management`_
|
||||
|
||||
@@ -217,10 +317,9 @@ The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||
termination on port 443 which in turn should be used for clients. Port 8448
|
||||
is configured to use TLS with a self-signed certificate. If you would like
|
||||
to do initial test with a client without having to setup a reverse proxy,
|
||||
you can temporarly use another certificate. (Note that a self-signed
|
||||
certificate is fine for `Federation`_). You can do so by changing
|
||||
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||
to do an initial test with a client without having to setup a reverse proxy,
|
||||
you can temporarly use another certificate. You can do so by changing
|
||||
``tls_certificate_path`` and ``tls_private_key_path``
|
||||
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||
|
||||
@@ -263,6 +362,8 @@ a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||
Running Synapse
|
||||
===============
|
||||
|
||||
**TODO: Needs update**
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. ``~/synapse``), and::
|
||||
|
||||
@@ -270,6 +371,16 @@ run (e.g. ``~/synapse``), and::
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
|
||||
@@ -330,15 +441,20 @@ See https://github.com/vector-im/riot-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
|
||||
|
||||
Platform-Specific Instructions
|
||||
==============================
|
||||
Platform-Specific Packages
|
||||
==========================
|
||||
|
||||
Note that the only officially supported installation method is what is listed
|
||||
in `Synapse installation`_. Instructions and packages for other platforms are
|
||||
listed below, but beware that they may be outdated.
|
||||
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
|
||||
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :).
|
||||
|
||||
Fedora
|
||||
------
|
||||
@@ -390,7 +506,6 @@ Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Mo
|
||||
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
|
||||
OpenBSD
|
||||
-------
|
||||
|
||||
@@ -424,12 +539,33 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
|
||||
Windows Install
|
||||
---------------
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||
for Windows Server.
|
||||
Running Synapse on Windows is not recommended or supported. However, if you
|
||||
wish to run Synapse on Windows, the Windows Subsystem For Linux provides a
|
||||
Linux environment on Windows 10 which is capable of using the Debian, Fedora,
|
||||
or source installation methods. More information about WSL can be found at
|
||||
https://docs.microsoft.com/en-us/windows/wsl/install-win10 for Windows 10 and
|
||||
https://docs.microsoft.com/en-us/windows/wsl/install-on-server for Windows
|
||||
Server.
|
||||
|
||||
|
||||
Alternative installation methods
|
||||
================================
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
||||
the docker-compose file available at `contrib/docker <contrib/docker>`_.
|
||||
Further information on this including configuration options is available in
|
||||
the README on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook, which installs the offical
|
||||
Docker image of Matrix Synapse along with many other Matrix-related services
|
||||
(Postgres database, riot-web, coturn, mxisd, SSL support, etc.). For more
|
||||
details, see https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
@@ -473,7 +609,7 @@ failing, e.g.::
|
||||
pip install twisted
|
||||
|
||||
Running out of File Handles
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
***************************
|
||||
|
||||
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
@@ -496,16 +632,6 @@ log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
.. _federation:
|
||||
|
||||
Setting up Federation
|
||||
@@ -725,8 +851,8 @@ caveats, you will need to do the following:
|
||||
tell other servers how to find you. See `Setting up Federation`_.
|
||||
|
||||
When updating the SSL certificate, just update the file pointed to by
|
||||
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||
use a symbolic link to help make this process atomic.)
|
||||
``tls_certificate_path`` and then restart Synapse. (You may like to use a symbolic link
|
||||
to help make this process atomic.)
|
||||
|
||||
The most common mistake when setting up federation is not to tell Synapse about
|
||||
your SSL certificate. To check it, you can visit
|
||||
@@ -796,8 +922,7 @@ A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
$ ~/synapse/env/bin/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
@@ -826,8 +951,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python2.7 env
|
||||
source env/bin/activate
|
||||
python -m synapse.python_dependencies | xargs pip install
|
||||
pip install lxml mock
|
||||
python -m pip install -e .[all]
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
@@ -835,7 +959,7 @@ dependencies into a virtual env.
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
PYTHONPATH="." trial tests
|
||||
python -m twisted.trial tests
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
|
||||
34
UPGRADE.rst
34
UPGRADE.rst
@@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade --process-dependency-links matrix-synapse
|
||||
pip install --upgrade matrix-synapse
|
||||
|
||||
# restart synapse
|
||||
synctl restart
|
||||
@@ -48,6 +48,38 @@ returned by the Client-Server API:
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v0.99.0
|
||||
====================
|
||||
|
||||
In preparation for Synapse v1.0, you must update your TLS certificates from
|
||||
self-signed ones to verifiable ones signed by a trusted root CA.
|
||||
|
||||
If you do not already have a certificate for your domain, the easiest way to get
|
||||
one is with Synapse's new ACME support, which will use the ACME protocol to
|
||||
provision a certificate automatically. By default, certificates will be obtained
|
||||
from the publicly trusted CA Let's Encrypt.
|
||||
|
||||
For a sample configuration, please inspect the new ACME section in the example
|
||||
generated config by running the ``generate-config`` executable. For example::
|
||||
|
||||
~/synapse/env3/bin/generate-config
|
||||
|
||||
You will need to provide Let's Encrypt (or other ACME provider) access to your
|
||||
Synapse ACME challenge responder on port 80, at the domain of your homeserver.
|
||||
This requires you either change the port of the ACME listener provided by
|
||||
Synapse to a high port and reverse proxy to it, or use a tool like authbind to
|
||||
allow Synapse to listen on port 80 without root access. (Do not run Synapse with
|
||||
root permissions!)
|
||||
|
||||
You will need to back up or delete your self signed TLS certificate
|
||||
(``example.com.tls.crt`` and ``example.com.tls.key``), Synapse's ACME
|
||||
implementation will not overwrite them.
|
||||
|
||||
You may wish to use alternate methods such as Certbot to obtain a certificate
|
||||
from Let's Encrypt, depending on your server configuration. Of course, if you
|
||||
already have a valid certificate for your homeserver's domain, that can be
|
||||
placed in Synapse's config directory without the need for ACME.
|
||||
|
||||
Upgrading to v0.34.0
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/3902.feature
Normal file
1
changelog.d/3902.feature
Normal file
@@ -0,0 +1 @@
|
||||
Include m.room.encryption on invites by default
|
||||
@@ -37,7 +37,7 @@ services:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=Host:my.matrix.Host
|
||||
- traefik.port=8448
|
||||
- traefik.port=8008
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:10-alpine
|
||||
|
||||
31
contrib/systemd/matrix-synapse.service
Normal file
31
contrib/systemd/matrix-synapse.service
Normal file
@@ -0,0 +1,31 @@
|
||||
# Example systemd configuration file for synapse. Copy into
|
||||
# /etc/systemd/system/, update the paths if necessary, then:
|
||||
#
|
||||
# systemctl enable matrix-synapse
|
||||
# systemctl start matrix-synapse
|
||||
#
|
||||
# This assumes that Synapse has been installed in a virtualenv in
|
||||
# /opt/synapse/env.
|
||||
#
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=on-abort
|
||||
|
||||
User=synapse
|
||||
Group=nogroup
|
||||
|
||||
WorkingDirectory=/opt/synapse
|
||||
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
|
||||
|
||||
# adjust the cache factor if necessary
|
||||
# Environment=SYNAPSE_CACHE_FACTOR=2.0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# This assumes that Synapse has been installed as a system package
|
||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
53
debian/build_virtualenv
vendored
53
debian/build_virtualenv
vendored
@@ -6,7 +6,16 @@
|
||||
set -e
|
||||
|
||||
export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
|
||||
SNAKE=/usr/bin/python3
|
||||
|
||||
# make sure that the virtualenv links to the specific version of python, by
|
||||
# dereferencing the python3 symlink.
|
||||
#
|
||||
# Otherwise, if somebody tries to install (say) the stretch package on buster,
|
||||
# they will get a confusing error about "No module named 'synapse'", because
|
||||
# python won't look in the right directory. At least this way, the error will
|
||||
# be a *bit* more obvious.
|
||||
#
|
||||
SNAKE=`readlink -e /usr/bin/python3`
|
||||
|
||||
# try to set the CFLAGS so any compiled C extensions are compiled with the most
|
||||
# generic as possible x64 instructions, so that compiling it on a new Intel chip
|
||||
@@ -33,7 +42,12 @@ dh_virtualenv \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile"
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
# we copy the tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
@@ -41,8 +55,37 @@ tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
cd debian/matrix-synapse-py3
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
./opt/venvs/matrix-synapse/bin/python \
|
||||
-B -m twisted.trial --reporter=text -j2 tests
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
# tweak the paths to the tls certs and signing keys
|
||||
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
|
||||
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
|
||||
|
||||
# tweak the pid file location
|
||||
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
|
||||
|
||||
# tweak the path to the log config
|
||||
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
|
||||
|
||||
# tweak the path to the media store
|
||||
/^media_store_path:/ and s#/media_store#/media#;
|
||||
|
||||
# remove the server_name setting, which is set in a separate file
|
||||
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
|
||||
|
||||
# remove the report_stats setting, which is set in a separate file
|
||||
/^# report_stats:/ and $_ = "";
|
||||
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
PYPKG=`basename $SNAKE`
|
||||
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
|
||||
|
||||
28
debian/changelog
vendored
28
debian/changelog
vendored
@@ -1,3 +1,31 @@
|
||||
matrix-synapse-py3 (0.34.1.1++1) stable; urgency=medium
|
||||
|
||||
* Update conflicts specifications to allow smoother transition from matrix-synapse.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Sat, 12 Jan 2019 12:58:35 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1.1) stable; urgency=high
|
||||
|
||||
* New synapse release 0.34.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 10 Jan 2019 15:04:52 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1+1) stable; urgency=medium
|
||||
|
||||
* Remove 'Breaks: matrix-synapse-ldap3'. (matrix-synapse-py3 includes
|
||||
the matrix-synapse-ldap3 python files, which makes the
|
||||
matrix-synapse-ldap3 debian package redundant but not broken.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Jan 2019 15:30:00 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.34.1.
|
||||
* Update Conflicts specifications to allow installation alongside our
|
||||
matrix-synapse transitional package.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Jan 2019 14:52:24 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.34.0.
|
||||
|
||||
11
debian/control
vendored
11
debian/control
vendored
@@ -5,7 +5,7 @@ Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
Build-Depends:
|
||||
debhelper (>= 9),
|
||||
dh-systemd,
|
||||
dh-virtualenv (>= 1.0),
|
||||
dh-virtualenv (>= 1.1),
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
@@ -13,19 +13,22 @@ Build-Depends:
|
||||
python3-pip,
|
||||
python3-venv,
|
||||
tar,
|
||||
Standards-Version: 3.9.5
|
||||
Standards-Version: 3.9.8
|
||||
Homepage: https://github.com/matrix-org/synapse
|
||||
|
||||
Package: matrix-synapse-py3
|
||||
Architecture: amd64
|
||||
Conflicts: matrix-synapse
|
||||
Provides: matrix-synapse
|
||||
Conflicts:
|
||||
matrix-synapse (<< 0.34.0.1-0matrix2),
|
||||
matrix-synapse (>= 0.34.0.1-1),
|
||||
Pre-Depends: dpkg (>= 1.16.1)
|
||||
Depends:
|
||||
adduser,
|
||||
debconf,
|
||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||
python3,
|
||||
${misc:Depends},
|
||||
${synapse:pydepends},
|
||||
# some of our scripts use perl, but none of them are important,
|
||||
# so we put perl:Depends in Suggests rather than Depends.
|
||||
Suggests:
|
||||
|
||||
617
debian/homeserver.yaml
vendored
617
debian/homeserver.yaml
vendored
@@ -1,617 +0,0 @@
|
||||
# vim:ft=yaml
|
||||
# PEM encoded X509 certificate for TLS.
|
||||
# You can replace the self-signed certificate that synapse
|
||||
# autogenerates on launch with your own SSL certificate + key pair
|
||||
# if you like. Any required intermediary certificates can be
|
||||
# appended after the primary certificate in hierarchical order.
|
||||
tls_certificate_path: "/etc/matrix-synapse/homeserver.tls.crt"
|
||||
|
||||
# PEM encoded private key for TLS
|
||||
tls_private_key_path: "/etc/matrix-synapse/homeserver.tls.key"
|
||||
|
||||
# PEM dh parameters for ephemeral keys
|
||||
tls_dh_params_path: "/etc/matrix-synapse/homeserver.tls.dh"
|
||||
|
||||
# Don't bind to the https port
|
||||
no_tls: False
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
|
||||
## Server ##
|
||||
|
||||
# When running as a daemon, the file to store the pid in
|
||||
pid_file: "/var/run/matrix-synapse.pid"
|
||||
|
||||
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||
# process will be scheduled. It is represented as a bitmask, with the
|
||||
# lowest order bit corresponding to the first logical CPU and the
|
||||
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||
# may exist on a given system but a mask may specify more CPUs than are
|
||||
# present.
|
||||
#
|
||||
# For example:
|
||||
# 0x00000001 is processor #0,
|
||||
# 0x00000003 is processors #0 and #1,
|
||||
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||
#
|
||||
# Pinning a Python process to a single CPU is desirable, because Python
|
||||
# is inherently single-threaded due to the GIL, and can suffer a
|
||||
# 30-40% slowdown due to cache blow-out and thread context switching
|
||||
# if the scheduler happens to schedule the underlying threads across
|
||||
# different cores. See
|
||||
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# The path to the web client which will be served at /_matrix/client/
|
||||
# if 'webclient' is configured under the 'listeners' configuration.
|
||||
#
|
||||
# web_client_location: "/path/to/web/root"
|
||||
|
||||
# The public-facing base URL for the client API (not including _matrix/...)
|
||||
# public_baseurl: https://example.com:8448/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
# hard limit.
|
||||
soft_file_limit: 0
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
# Set the limit on the returned events in the timeline in the get
|
||||
# and sync operations. The default value is -1, means no upper limit.
|
||||
# filter_timeline_limit: 5000
|
||||
|
||||
# Whether room invites to users on this server should be blocked
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
# inbound federation traffic as early as possible, rather than relying
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
# federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
# Main HTTPS listener
|
||||
# For when matrix traffic is sent directly to synapse.
|
||||
-
|
||||
# The port to listen for HTTPS requests on.
|
||||
port: 8448
|
||||
|
||||
# Local addresses to listen on.
|
||||
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||
# addresses by default. For most other OSes, this will only listen
|
||||
# on IPv6.
|
||||
bind_addresses:
|
||||
- '::'
|
||||
- '0.0.0.0'
|
||||
|
||||
# This is a 'http' listener, allows us to specify 'resources'.
|
||||
type: http
|
||||
|
||||
tls: true
|
||||
|
||||
# Use the X-Forwarded-For (XFF) header as the client IP and not the
|
||||
# actual client IP.
|
||||
x_forwarded: false
|
||||
|
||||
# List of HTTP resources to serve on this listener.
|
||||
resources:
|
||||
-
|
||||
# List of resources to host on this listener.
|
||||
names:
|
||||
- client # The client-server APIs, both v1 and v2
|
||||
- webclient # The bundled webclient.
|
||||
|
||||
# Should synapse compress HTTP responses to clients that support it?
|
||||
# This should be disabled if running synapse behind a load balancer
|
||||
# that can do automatic compression.
|
||||
compress: true
|
||||
|
||||
- names: [federation] # Federation APIs
|
||||
compress: false
|
||||
|
||||
# optional list of additional endpoints which can be loaded via
|
||||
# dynamic modules
|
||||
# additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Unsecure HTTP listener,
|
||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||
- port: 8008
|
||||
tls: false
|
||||
bind_addresses: ['::', '0.0.0.0']
|
||||
type: http
|
||||
|
||||
x_forwarded: false
|
||||
|
||||
resources:
|
||||
- names: [client, webclient]
|
||||
compress: true
|
||||
- names: [federation]
|
||||
compress: false
|
||||
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
# Database configuration
|
||||
database:
|
||||
# The database engine name
|
||||
name: "sqlite3"
|
||||
# Arguments to pass to the engine
|
||||
args:
|
||||
# Path to the database
|
||||
database: "/var/lib/matrix-synapse/homeserver.db"
|
||||
|
||||
# Number of events to cache in memory.
|
||||
event_cache_size: "10K"
|
||||
|
||||
|
||||
# A yaml python logging config file
|
||||
log_config: "/etc/matrix-synapse/log.yaml"
|
||||
|
||||
|
||||
|
||||
## Ratelimiting ##
|
||||
|
||||
# Number of messages a client can send per second
|
||||
rc_messages_per_second: 0.2
|
||||
|
||||
# Number of message a client can send before being throttled
|
||||
rc_message_burst_count: 10.0
|
||||
|
||||
# The federation window size in milliseconds
|
||||
federation_rc_window_size: 1000
|
||||
|
||||
# The number of federation requests from a single server in a window
|
||||
# before the server will delay processing the request.
|
||||
federation_rc_sleep_limit: 10
|
||||
|
||||
# The duration in milliseconds to delay processing events from
|
||||
# remote servers by if they go over the sleep limit.
|
||||
federation_rc_sleep_delay: 500
|
||||
|
||||
# The maximum number of concurrent federation requests allowed
|
||||
# from a single server
|
||||
federation_rc_reject_limit: 50
|
||||
|
||||
# The number of federation requests to concurrently process from a
|
||||
# single server
|
||||
federation_rc_concurrent: 3
|
||||
|
||||
|
||||
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
media_store_path: "/var/lib/matrix-synapse/media"
|
||||
|
||||
# Media storage providers allow media to be stored in different
|
||||
# locations.
|
||||
# media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
# Directory where in-progress uploads are stored.
|
||||
uploads_path: "/var/lib/matrix-synapse/uploads"
|
||||
|
||||
# The largest allowed upload size in bytes
|
||||
max_upload_size: "10M"
|
||||
|
||||
# Maximum number of pixels that will be thumbnailed
|
||||
max_image_pixels: "32M"
|
||||
|
||||
# Whether to generate new thumbnails on the fly to precisely match
|
||||
# the resolution requested by the client. If true then whenever
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalculated list.
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
method: crop
|
||||
- width: 96
|
||||
height: 96
|
||||
method: crop
|
||||
- width: 320
|
||||
height: 240
|
||||
method: scale
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
# specify a list for URL previewing to work. You should specify any
|
||||
# internal services in your network that you do not want synapse to try
|
||||
# to connect to, otherwise anyone in any Matrix room could cause your
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
# in preference to this, otherwise someone could define a public DNS
|
||||
# entry that points to a private IP address and circumvent the blacklist.
|
||||
# This is more useful if you know there is an entire shape of URL that
|
||||
# you know that will never want synapse to try to spider.
|
||||
#
|
||||
# Each list entry is a dictionary of url component attributes as returned
|
||||
# by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
|
||||
# The values of the dictionary are treated as an filename match pattern
|
||||
# applied to that component of URLs, unless they start with a ^ in which
|
||||
# case they are treated as a regular expression match. If all the
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
|
||||
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
# public/private key.
|
||||
enable_registration_captcha: False
|
||||
|
||||
# A secret key used to bypass the captcha test entirely.
|
||||
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||
|
||||
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
|
||||
|
||||
## Turn ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
turn_user_lifetime: "1h"
|
||||
|
||||
# Whether guests should be allowed to use the TURN server.
|
||||
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
||||
# However, it does introduce a slight security risk as it allows users to
|
||||
# connect to arbitrary endpoints without having first signed up for a
|
||||
# valid account (e.g. by passing a CAPTCHA).
|
||||
turn_allow_guests: False
|
||||
|
||||
|
||||
## Registration ##
|
||||
|
||||
# Enable registration for new users.
|
||||
enable_registration: False
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
# registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: ".*@matrix\.org"
|
||||
# - medium: email
|
||||
# pattern: ".*@vector\.im"
|
||||
# - medium: msisdn
|
||||
# pattern: "\+44"
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
# registration_shared_secret: <PRIVATE STRING>
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
# participate in rooms hosted on this server which have been made
|
||||
# accessible to anonymous users.
|
||||
allow_guest_access: False
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
|
||||
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
enable_metrics: False
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
|
||||
# A list of application service config file to use
|
||||
app_service_config_files: []
|
||||
|
||||
|
||||
# macaroon_secret_key: <PRIVATE STRING>
|
||||
|
||||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
signing_key_path: "/etc/matrix-synapse/homeserver.signing.key"
|
||||
|
||||
# The keys that the server used to sign messages with but won't use
|
||||
# to sign new messages. E.g. it has lost its private key
|
||||
old_signing_keys: {}
|
||||
# "ed25519:auto":
|
||||
# # Base64 encoded public key
|
||||
# key: "The public part of your old signing key."
|
||||
# # Millisecond POSIX timestamp when the key expired.
|
||||
# expired_ts: 123456789123
|
||||
|
||||
# How long key response published by this server is valid for.
|
||||
# Used to set the valid_until_ts in /key/v2 APIs.
|
||||
# Determines how quickly servers will query to check which keys
|
||||
# are still valid.
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
verify_keys:
|
||||
"ed25519:auto":
|
||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
|
||||
|
||||
|
||||
# Enable SAML2 for registration and login. Uses pysaml2
|
||||
# config_path: Path to the sp_conf.py configuration file
|
||||
# idp_redirect_url: Identity provider URL which will redirect
|
||||
# the user back to /login/saml2 with proper info.
|
||||
# See pysaml2 docs for format of config.
|
||||
#saml2_config:
|
||||
# enabled: true
|
||||
# config_path: "/home/erikj/git/synapse/sp_conf.py"
|
||||
# idp_redirect_url: "http://test/idp"
|
||||
|
||||
|
||||
|
||||
# Enable CAS for registration and login.
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
# service_url: "https://homeserver.domain.com:8448"
|
||||
# #required_attributes:
|
||||
# # name: value
|
||||
|
||||
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
|
||||
|
||||
|
||||
# Enable password for login.
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
#pepper: ""
|
||||
|
||||
|
||||
|
||||
# Enable sending emails for notification events
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
#
|
||||
# If your SMTP server requires authentication, the optional smtp_user &
|
||||
# smtp_pass variables should be used
|
||||
#
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25
|
||||
# smtp_user: "exampleusername"
|
||||
# smtp_pass: "examplepassword"
|
||||
# require_transport_security: False
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
|
||||
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
|
||||
|
||||
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
|
||||
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
|
||||
|
||||
# Whether to allow non server admins to create groups on this server
|
||||
enable_group_creation: false
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# group_creation_prefix: "unofficial/"
|
||||
|
||||
|
||||
|
||||
# User Directory configuration
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
1
debian/install
vendored
1
debian/install
vendored
@@ -1,2 +1 @@
|
||||
debian/homeserver.yaml etc/matrix-synapse
|
||||
debian/log.yaml etc/matrix-synapse
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
2048-bit DH parameters taken from rfc3526
|
||||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA///////////JD9qiIWjCNMTGYouA3BzRKQJOCIpnzHQCC76mOxOb
|
||||
IlFKCHmONATd75UZs806QxswKwpt8l8UN0/hNW1tUcJF5IW1dmJefsb0TELppjft
|
||||
awv/XLb0Brft7jhr+1qJn6WunyQRfEsf5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT
|
||||
mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVSu57VKQdwlpZtZww1Tkq8mATxdGwIyhgh
|
||||
fDKQXkYuNs474553LBgOhgObJ4Oi7Aeij7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq
|
||||
5RXSJhiY+gUQFXKOWoqsqmj//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
||||
@@ -33,9 +33,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
|
||||
COPY . /synapse
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
lxml \
|
||||
psycopg2 \
|
||||
/synapse
|
||||
/synapse[all]
|
||||
|
||||
###
|
||||
### Stage 1: runtime
|
||||
|
||||
@@ -11,6 +11,35 @@
|
||||
|
||||
# Get the distro we want to pull from as a dynamic build variable
|
||||
ARG distro=""
|
||||
|
||||
###
|
||||
### Stage 0: build a dh-virtualenv
|
||||
###
|
||||
FROM ${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
RUN wget -q -O /dh-virtuenv-1.1.tar.gz https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
|
||||
RUN tar xvf /dh-virtuenv-1.1.tar.gz
|
||||
|
||||
# install its build deps
|
||||
RUN cd dh-virtualenv-1.1/ \
|
||||
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends"
|
||||
|
||||
# build it
|
||||
RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
||||
|
||||
###
|
||||
### Stage 1
|
||||
###
|
||||
FROM ${distro}
|
||||
|
||||
# Install the build dependencies
|
||||
@@ -21,15 +50,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
debhelper \
|
||||
devscripts \
|
||||
dh-systemd \
|
||||
dh-virtualenv \
|
||||
equivs \
|
||||
lsb-release \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
wget
|
||||
sqlite3
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
|
||||
RUN apt-get install -yq /dh-virtualenv_1.1-1_all.deb
|
||||
|
||||
WORKDIR /synapse/source
|
||||
ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
|
||||
|
||||
@@ -6,20 +6,6 @@ set -ex
|
||||
|
||||
DIST=`lsb_release -c -s`
|
||||
|
||||
# We need to build a newer dh_virtualenv on older OSes like Xenial.
|
||||
if [ "$DIST" = 'xenial' ]; then
|
||||
mkdir -p /tmp/dhvenv
|
||||
cd /tmp/dhvenv
|
||||
wget https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
|
||||
tar xvf 1.1.tar.gz
|
||||
cd dh-virtualenv-1.1/
|
||||
env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io"
|
||||
dpkg-buildpackage -us -uc -b
|
||||
cd /tmp/dhvenv
|
||||
apt-get install -yqq ./dh-virtualenv_1.1-1_all.deb
|
||||
fi
|
||||
|
||||
|
||||
# we get a read-only copy of the source: make a writeable copy
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build the Debian packages using Docker images.
|
||||
#
|
||||
# This script builds the Docker images and then executes them sequentially, each
|
||||
# one building a Debian package for the targeted operating system. It is
|
||||
# designed to be a "single command" to produce all the images.
|
||||
#
|
||||
# By default, builds for all known distributions, but a list of distributions
|
||||
# can be passed on the commandline for debugging.
|
||||
|
||||
set -ex
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
DISTS=(
|
||||
debian:stretch
|
||||
debian:buster
|
||||
debian:sid
|
||||
ubuntu:xenial
|
||||
ubuntu:bionic
|
||||
ubuntu:cosmic
|
||||
)
|
||||
else
|
||||
DISTS=("$@")
|
||||
fi
|
||||
|
||||
# Make the dir where the debs will live.
|
||||
#
|
||||
# Note that we deliberately put this outside the source tree, otherwise we tend
|
||||
# to get source packages which are full of debs. (We could hack around that
|
||||
# with more magic in the build_debian.sh script, but that doesn't solve the
|
||||
# problem for natively-run dpkg-buildpakage).
|
||||
|
||||
mkdir -p ../../debs
|
||||
|
||||
# Build each OS image;
|
||||
for i in "${DISTS[@]}"; do
|
||||
TAG=$(echo ${i} | cut -d ":" -f 2)
|
||||
docker build --tag dh-venv-builder:${TAG} --build-arg distro=${i} -f Dockerfile-dhvirtualenv .
|
||||
docker run -it --rm --volume=$(pwd)/../\:/synapse/source:ro --volume=$(pwd)/../../debs:/debs \
|
||||
-e TARGET_USERID=$(id -u) \
|
||||
-e TARGET_GROUPID=$(id -g) \
|
||||
dh-venv-builder:${TAG}
|
||||
done
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
||||
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
||||
tls_fingerprints: []
|
||||
|
||||
|
||||
@@ -39,13 +39,13 @@ As an example::
|
||||
}
|
||||
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, and either
|
||||
the string "admin" or "notadmin", each separated by NULs. For an example of
|
||||
generation in Python::
|
||||
the shared secret and the content being the nonce, user, password, either the
|
||||
string "admin" or "notadmin", and optionally the user_type
|
||||
each separated by NULs. For an example of generation in Python::
|
||||
|
||||
import hmac, hashlib
|
||||
|
||||
def generate_mac(nonce, user, password, admin=False):
|
||||
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
@@ -59,5 +59,8 @@ generation in Python::
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
if user_type:
|
||||
mac.update(b"\x00")
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
return mac.hexdigest()
|
||||
|
||||
@@ -40,7 +40,6 @@ You may be able to setup coturn via your package manager, or set it up manually
|
||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
||||
lines, with example values, are::
|
||||
|
||||
lt-cred-mech
|
||||
use-auth-secret
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
@@ -52,7 +51,7 @@ You may be able to setup coturn via your package manager, or set it up manually
|
||||
|
||||
5. Consider your security settings. TURN lets users request a relay
|
||||
which will connect to arbitrary IP addresses and ports. At the least
|
||||
we recommend:
|
||||
we recommend::
|
||||
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
@@ -106,7 +105,7 @@ Your home server configuration file needs the following extra keys:
|
||||
to refresh credentials. The TURN REST API specification recommends
|
||||
one day (86400000).
|
||||
|
||||
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||
server. This is enabled by default, as otherwise VoIP will not
|
||||
work reliably for guests. However, it does introduce a security risk
|
||||
as it lets guests connect to arbitrary endpoints without having gone
|
||||
|
||||
154
scripts-dev/build_debian_packages
Executable file
154
scripts-dev/build_debian_packages
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Build the Debian packages using Docker images.
|
||||
#
|
||||
# This script builds the Docker images and then executes them sequentially, each
|
||||
# one building a Debian package for the targeted operating system. It is
|
||||
# designed to be a "single command" to produce all the images.
|
||||
#
|
||||
# By default, builds for all known distributions, but a list of distributions
|
||||
# can be passed on the commandline for debugging.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
DISTS = (
|
||||
"debian:stretch",
|
||||
"debian:buster",
|
||||
"debian:sid",
|
||||
"ubuntu:xenial",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:cosmic",
|
||||
)
|
||||
|
||||
DESC = '''\
|
||||
Builds .debs for synapse, using a Docker image for the build environment.
|
||||
|
||||
By default, builds for all known distributions, but a list of distributions
|
||||
can be passed on the commandline for debugging.
|
||||
'''
|
||||
|
||||
|
||||
class Builder(object):
|
||||
def __init__(self, redirect_stdout=False):
|
||||
self.redirect_stdout = redirect_stdout
|
||||
self.active_containers = set()
|
||||
self._lock = threading.Lock()
|
||||
self._failed = False
|
||||
|
||||
def run_build(self, dist):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
print("not building %s due to earlier failure" % (dist, ))
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
self._inner_build(dist)
|
||||
except Exception as e:
|
||||
print("build of %s failed: %s" % (dist, e), file=sys.stderr)
|
||||
self._failed = True
|
||||
raise
|
||||
|
||||
def _inner_build(self, dist):
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
os.chdir(projdir)
|
||||
|
||||
tag = dist.split(":", 1)[1]
|
||||
|
||||
# Make the dir where the debs will live.
|
||||
#
|
||||
# Note that we deliberately put this outside the source tree, otherwise
|
||||
# we tend to get source packages which are full of debs. (We could hack
|
||||
# around that with more magic in the build_debian.sh script, but that
|
||||
# doesn't solve the problem for natively-run dpkg-buildpakage).
|
||||
debsdir = os.path.join(projdir, '../debs')
|
||||
os.makedirs(debsdir, exist_ok=True)
|
||||
|
||||
if self.redirect_stdout:
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
|
||||
print("building %s: directing output to %s" % (dist, logfile))
|
||||
stdout = open(logfile, "w")
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
# first build a docker image for the build environment
|
||||
subprocess.check_call([
|
||||
"docker", "build",
|
||||
"--tag", "dh-venv-builder:" + tag,
|
||||
"--build-arg", "distro=" + dist,
|
||||
"-f", "docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
container_name = "synapse_build_" + tag
|
||||
with self._lock:
|
||||
self.active_containers.add(container_name)
|
||||
|
||||
# then run the build itself
|
||||
subprocess.check_call([
|
||||
"docker", "run",
|
||||
"--rm",
|
||||
"--name", container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
with self._lock:
|
||||
self.active_containers.remove(container_name)
|
||||
|
||||
if stdout is not None:
|
||||
stdout.close()
|
||||
print("Completed build of %s" % (dist, ))
|
||||
|
||||
def kill_containers(self):
|
||||
with self._lock:
|
||||
active = list(self.active_containers)
|
||||
|
||||
for c in active:
|
||||
print("killing container %s" % (c,))
|
||||
subprocess.run([
|
||||
"docker", "kill", c,
|
||||
], stdout=subprocess.DEVNULL)
|
||||
with self._lock:
|
||||
self.active_containers.remove(c)
|
||||
|
||||
|
||||
def run_builds(dists, jobs=1):
|
||||
builder = Builder(redirect_stdout=(jobs > 1))
|
||||
|
||||
def sig(signum, _frame):
|
||||
print("Caught SIGINT")
|
||||
builder.kill_containers()
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(builder.run_build, dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for r in res:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(
|
||||
description=DESC,
|
||||
)
|
||||
parser.add_argument(
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
)
|
||||
parser.add_argument(
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs)
|
||||
67
scripts/generate_config
Executable file
67
scripts/generate_config
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
default="CONFDIR",
|
||||
|
||||
help="The path where the config files are kept. Used to create filenames for "
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--data-dir",
|
||||
default="DATADIR",
|
||||
help="The path where the data files are kept. Used to create filenames for "
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--server-name",
|
||||
default="SERVERNAME",
|
||||
help="The server name. Used to initialise the server_name config param, but also "
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--report-stats",
|
||||
action="store",
|
||||
help="Whether the generated config reports anonymized usage statistics",
|
||||
choices=["yes", "no"],
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--generate-secrets",
|
||||
action="store_true",
|
||||
help="Enable generation of new secrets for things like the macaroon_secret_key."
|
||||
"By default, these parameters will be left unset."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--output-file",
|
||||
type=argparse.FileType('w'),
|
||||
default=sys.stdout,
|
||||
help="File to write the configuration to. Default: stdout",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
report_stats = args.report_stats
|
||||
if report_stats is not None:
|
||||
report_stats = report_stats == "yes"
|
||||
|
||||
conf = HomeServerConfig().generate_config(
|
||||
config_dir_path=args.config_dir,
|
||||
data_dir_path=args.data_dir,
|
||||
server_name=args.server_name,
|
||||
generate_secrets=args.generate_secrets,
|
||||
report_stats=report_stats,
|
||||
)
|
||||
|
||||
args.output_file.write(conf)
|
||||
16
setup.py
16
setup.py
@@ -84,13 +84,25 @@ version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
|
||||
REQUIREMENTS = dependencies['REQUIREMENTS']
|
||||
CONDITIONAL_REQUIREMENTS = dependencies['CONDITIONAL_REQUIREMENTS']
|
||||
|
||||
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
|
||||
ALL_OPTIONAL_REQUIREMENTS = set()
|
||||
|
||||
for optional_deps in CONDITIONAL_REQUIREMENTS.values():
|
||||
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
|
||||
|
||||
|
||||
setup(
|
||||
name="matrix-synapse",
|
||||
version=version,
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference homeserver for the Matrix decentralised comms protocol",
|
||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||
install_requires=REQUIREMENTS,
|
||||
extras_require=CONDITIONAL_REQUIREMENTS,
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.34.0.1"
|
||||
__version__ = "0.99.0rc2"
|
||||
|
||||
@@ -35,6 +35,7 @@ def request_registration(
|
||||
server_location,
|
||||
shared_secret,
|
||||
admin=False,
|
||||
user_type=None,
|
||||
requests=_requests,
|
||||
_print=print,
|
||||
exit=sys.exit,
|
||||
@@ -45,7 +46,7 @@ def request_registration(
|
||||
# Get the nonce
|
||||
r = requests.get(url, verify=False)
|
||||
|
||||
if r.status_code is not 200:
|
||||
if r.status_code != 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -65,6 +66,9 @@ def request_registration(
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
if user_type:
|
||||
mac.update(b"\x00")
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
@@ -74,12 +78,13 @@ def request_registration(
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"admin": admin,
|
||||
"user_type": user_type,
|
||||
}
|
||||
|
||||
_print("Sending registration request...")
|
||||
r = requests.post(url, json=data, verify=False)
|
||||
|
||||
if r.status_code is not 200:
|
||||
if r.status_code != 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
@@ -91,7 +96,7 @@ def request_registration(
|
||||
_print("Success!")
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
def register_new_user(user, password, server_location, shared_secret, admin, user_type):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
@@ -129,7 +134,8 @@ def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
request_registration(user, password, server_location, shared_secret,
|
||||
bool(admin), user_type)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -154,6 +160,12 @@ def main():
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--user_type",
|
||||
default=None,
|
||||
help="User type as specified in synapse.api.constants.UserTypes",
|
||||
)
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
@@ -208,7 +220,8 @@ def main():
|
||||
if args.admin or args.no_admin:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
||||
register_new_user(args.user, args.password, args.server_url, secret,
|
||||
admin, args.user_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -65,7 +65,7 @@ class Auth(object):
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
def check_from_context(self, room_version, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True,
|
||||
@@ -74,12 +74,16 @@ class Auth(object):
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in itervalues(auth_events)
|
||||
}
|
||||
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
|
||||
self.check(
|
||||
room_version, event,
|
||||
auth_events=auth_events, do_sig_check=do_sig_check,
|
||||
)
|
||||
|
||||
def check(self, event, auth_events, do_sig_check=True):
|
||||
def check(self, room_version, event, auth_events, do_sig_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version (str): version of the room
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
@@ -88,7 +92,9 @@ class Auth(object):
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
with Measure(self.clock, "auth.check"):
|
||||
event_auth.check(event, auth_events, do_sig_check=do_sig_check)
|
||||
event_auth.check(
|
||||
room_version, event, auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||
@@ -544,17 +550,6 @@ class Auth(object):
|
||||
"""
|
||||
return self.store.is_server_admin(user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_auth_events(self, builder, context):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
)
|
||||
|
||||
builder.auth_events = auth_events_entries
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
||||
if event.type == EventTypes.Create:
|
||||
@@ -571,7 +566,7 @@ class Auth(object):
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event_id = current_state_ids.get(key)
|
||||
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
key = (EventTypes.Member, event.sender, )
|
||||
member_event_id = current_state_ids.get(key)
|
||||
|
||||
key = (EventTypes.Create, "", )
|
||||
@@ -621,7 +616,7 @@ class Auth(object):
|
||||
|
||||
defer.returnValue(auth_ids)
|
||||
|
||||
def check_redaction(self, event, auth_events):
|
||||
def check_redaction(self, room_version, event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
@@ -634,7 +629,7 @@ class Auth(object):
|
||||
AuthError if the event sender is definitely not allowed to redact
|
||||
the target event.
|
||||
"""
|
||||
return event_auth.check_redaction(event, auth_events)
|
||||
return event_auth.check_redaction(room_version, event, auth_events)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
@@ -791,9 +786,10 @@ class Auth(object):
|
||||
threepid should never be set at the same time.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users
|
||||
# Never fail an auth check for the server notices users or support user
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
is_support = yield self.store.is_support_user(user_id)
|
||||
if user_id == self.hs.config.server_notices_mxid or is_support:
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
@@ -818,7 +814,9 @@ class Auth(object):
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
if is_threepid_reserved(
|
||||
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||
):
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
|
||||
@@ -68,10 +68,12 @@ class EventTypes(object):
|
||||
Aliases = "m.room.aliases"
|
||||
Redaction = "m.room.redaction"
|
||||
ThirdPartyInvite = "m.room.third_party_invite"
|
||||
Encryption = "m.room.encryption"
|
||||
|
||||
RoomHistoryVisibility = "m.room.history_visibility"
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
RoomEncryption = "m.room.encryption"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
|
||||
# These are used for validation
|
||||
@@ -102,10 +104,16 @@ class ThirdPartyEntityKind(object):
|
||||
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
V2 = "2"
|
||||
V3 = "3"
|
||||
STATE_V2_TEST = "state-v2-test"
|
||||
|
||||
|
||||
class RoomDisposition(object):
|
||||
STABLE = "stable"
|
||||
UNSTABLE = "unstable"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
@@ -113,9 +121,34 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V1,
|
||||
RoomVersions.VDH_TEST,
|
||||
RoomVersions.V2,
|
||||
RoomVersions.V3,
|
||||
RoomVersions.STATE_V2_TEST,
|
||||
RoomVersions.V3,
|
||||
}
|
||||
|
||||
|
||||
class EventFormatVersions(object):
|
||||
"""This is an internal enum for tracking the version of the event format,
|
||||
independently from the room version.
|
||||
"""
|
||||
V1 = 1
|
||||
V2 = 2
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.V2,
|
||||
}
|
||||
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
|
||||
|
||||
class UserTypes(object):
|
||||
"""Allows for user type specific behaviour. With the benefit of hindsight
|
||||
'admin' and 'guest' users should also be UserTypes. Normal users are type None
|
||||
"""
|
||||
SUPPORT = "support"
|
||||
ALL_USER_TYPES = (SUPPORT,)
|
||||
|
||||
@@ -348,6 +348,24 @@ class IncompatibleRoomVersionError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class RequestSendFailed(RuntimeError):
|
||||
"""Sending a HTTP request over federation failed due to not being able to
|
||||
talk to the remote server for some reason.
|
||||
|
||||
This exception is used to differentiate "expected" errors that arise due to
|
||||
networking (e.g. DNS failures, connection timeouts etc), versus unexpected
|
||||
errors (like programming errors).
|
||||
"""
|
||||
def __init__(self, inner_exception, can_retry):
|
||||
super(RequestSendFailed, self).__init__(
|
||||
"Failed to send request: %s: %s" % (
|
||||
type(inner_exception).__name__, inner_exception,
|
||||
)
|
||||
)
|
||||
self.inner_exception = inner_exception
|
||||
self.can_retry = can_retry
|
||||
|
||||
|
||||
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
""" Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from six import text_type
|
||||
|
||||
import jsonschema
|
||||
from canonicaljson import json
|
||||
from jsonschema import FormatChecker
|
||||
@@ -353,7 +355,7 @@ class Filter(object):
|
||||
sender = event.user_id
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
is_url = False
|
||||
contains_url = False
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
@@ -368,13 +370,16 @@ class Filter(object):
|
||||
|
||||
room_id = event.get("room_id", None)
|
||||
ev_type = event.get("type", None)
|
||||
is_url = "url" in event.get("content", {})
|
||||
|
||||
content = event.get("content", {})
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
contains_url = isinstance(content.get("url"), text_type)
|
||||
|
||||
return self.check_fields(
|
||||
room_id,
|
||||
sender,
|
||||
ev_type,
|
||||
is_url,
|
||||
contains_url,
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
@@ -439,6 +444,20 @@ class Filter(object):
|
||||
def include_redundant_members(self):
|
||||
return self.filter_json.get("include_redundant_members", False)
|
||||
|
||||
def with_room_ids(self, room_ids):
|
||||
"""Returns a new filter with the given room IDs appended.
|
||||
|
||||
Args:
|
||||
room_ids (iterable[unicode]): The room_ids to add
|
||||
|
||||
Returns:
|
||||
filter: A new filter including the given rooms and the old
|
||||
filter's rooms.
|
||||
"""
|
||||
newFilter = Filter(self.filter_json)
|
||||
newFilter.rooms += room_ids
|
||||
return newFilter
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
|
||||
@@ -24,7 +24,9 @@ from synapse.config import ConfigError
|
||||
|
||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||
FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||
FEDERATION_PREFIX = "/_matrix/federation"
|
||||
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
||||
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
|
||||
@@ -12,22 +12,38 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (str(e),),
|
||||
"To install run:",
|
||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||
"",
|
||||
])
|
||||
sys.stderr.writelines(message)
|
||||
except python_dependencies.DependencyException as e:
|
||||
sys.stderr.writelines(e.message)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def check_bind_error(e, address, bind_addresses):
|
||||
"""
|
||||
This method checks an exception occurred while binding on 0.0.0.0.
|
||||
If :: is specified in the bind addresses a warning is shown.
|
||||
The exception is still raised otherwise.
|
||||
|
||||
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||
|
||||
Args:
|
||||
e (Exception): Exception that was caught.
|
||||
address (str): Address on which binding was attempted.
|
||||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||
else:
|
||||
raise e
|
||||
|
||||
@@ -22,6 +22,7 @@ from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import error, reactor
|
||||
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
|
||||
@@ -143,6 +144,9 @@ def listen_metrics(bind_addresses, port):
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
|
||||
Returns:
|
||||
list (empty)
|
||||
"""
|
||||
for address in bind_addresses:
|
||||
try:
|
||||
@@ -155,42 +159,33 @@ def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
logger.info("Synapse now listening on TCP port %d", port)
|
||||
return []
|
||||
|
||||
|
||||
def listen_ssl(
|
||||
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
|
||||
):
|
||||
"""
|
||||
Create an SSL socket for a port and several addresses
|
||||
Create an TLS-over-TCP socket for a port and several addresses
|
||||
|
||||
Returns:
|
||||
list of twisted.internet.tcp.Port listening for TLS connections
|
||||
"""
|
||||
r = []
|
||||
for address in bind_addresses:
|
||||
try:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
factory,
|
||||
context_factory,
|
||||
backlog,
|
||||
address
|
||||
r.append(
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
factory,
|
||||
context_factory,
|
||||
backlog,
|
||||
address
|
||||
)
|
||||
)
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
|
||||
def check_bind_error(e, address, bind_addresses):
|
||||
"""
|
||||
This method checks an exception occurred while binding on 0.0.0.0.
|
||||
If :: is specified in the bind addresses a warning is shown.
|
||||
The exception is still raised otherwise.
|
||||
|
||||
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||
|
||||
Args:
|
||||
e (Exception): Exception that was caught.
|
||||
address (str): Address on which binding was attempted.
|
||||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||
else:
|
||||
raise e
|
||||
logger.info("Synapse now listening on port %d (TLS)", port)
|
||||
return r
|
||||
|
||||
@@ -164,23 +164,23 @@ def start(config_options):
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -185,23 +185,23 @@ def start(config_options):
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -151,23 +151,23 @@ def start(config_options):
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -183,24 +183,24 @@ def start(config_options):
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
ss = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||
|
||||
@@ -241,23 +241,23 @@ def start(config_options):
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -13,10 +13,13 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -25,6 +28,7 @@ from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
@@ -60,6 +64,7 @@ from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.well_known import WellKnownResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore, are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
@@ -81,6 +86,7 @@ def gz_wrap(r):
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore
|
||||
_listening_services = []
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
@@ -89,7 +95,9 @@ class SynapseHomeServer(HomeServer):
|
||||
site_tag = listener_config.get("tag", port)
|
||||
|
||||
if tls and config.no_tls:
|
||||
return
|
||||
raise ConfigError(
|
||||
"Listener on port %i has TLS enabled, but no_tls is set" % (port,),
|
||||
)
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
@@ -118,7 +126,7 @@ class SynapseHomeServer(HomeServer):
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
listen_ssl(
|
||||
return listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
@@ -132,7 +140,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
else:
|
||||
listen_tcp(
|
||||
return listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
@@ -143,7 +151,6 @@ class SynapseHomeServer(HomeServer):
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
||||
def _configure_named_resource(self, name, compress=False):
|
||||
"""Build a resource map for a named resource
|
||||
@@ -168,8 +175,13 @@ class SynapseHomeServer(HomeServer):
|
||||
"/_matrix/client/unstable": client_resource,
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
"/_matrix/client/versions": client_resource,
|
||||
"/.well-known/matrix/client": WellKnownResource(self),
|
||||
})
|
||||
|
||||
if self.get_config().saml2_enabled:
|
||||
from synapse.rest.saml2 import SAML2Resource
|
||||
resources["/_matrix/saml2"] = SAML2Resource(self)
|
||||
|
||||
if name == "consent":
|
||||
from synapse.rest.consent.consent_resource import ConsentResource
|
||||
consent_resource = ConsentResource(self)
|
||||
@@ -234,7 +246,9 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
for listener in config.listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
self._listening_services.extend(
|
||||
self._listener_http(config, listener)
|
||||
)
|
||||
elif listener["type"] == "manhole":
|
||||
listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
@@ -314,24 +328,28 @@ def setup(config_options):
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||
sighup_callbacks = []
|
||||
synapse.config.logger.setup_logging(
|
||||
config,
|
||||
use_worker_options=False,
|
||||
register_sighup=sighup_callbacks.append
|
||||
)
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
def handle_sighup(*args, **kwargs):
|
||||
for i in sighup_callbacks:
|
||||
i(*args, **kwargs)
|
||||
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, handle_sighup)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
@@ -358,12 +376,78 @@ def setup(config_options):
|
||||
logger.info("Database prepared in %s.", config.database_config['name'])
|
||||
|
||||
hs.setup()
|
||||
hs.start_listening()
|
||||
|
||||
def refresh_certificate(*args):
|
||||
"""
|
||||
Refresh the TLS certificates that Synapse is using by re-reading them
|
||||
from disk and updating the TLS context factories to use them.
|
||||
"""
|
||||
logging.info("Reloading certificate from disk...")
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
logging.info("Certificate reloaded.")
|
||||
|
||||
logging.info("Updating context factories...")
|
||||
for i in hs._listening_services:
|
||||
if isinstance(i.factory, TLSMemoryBIOFactory):
|
||||
i.factory = TLSMemoryBIOFactory(
|
||||
hs.tls_server_context_factory,
|
||||
False,
|
||||
i.factory.wrappedFactory
|
||||
)
|
||||
logging.info("Context factories updated.")
|
||||
|
||||
sighup_callbacks.append(refresh_certificate)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start():
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
try:
|
||||
# Check if the certificate is still valid.
|
||||
cert_days_remaining = hs.config.is_disk_cert_valid()
|
||||
|
||||
if hs.config.acme_enabled:
|
||||
# If ACME is enabled, we might need to provision a certificate
|
||||
# before starting.
|
||||
acme = hs.get_acme_handler()
|
||||
|
||||
# Start up the webservices which we will respond to ACME
|
||||
# challenges with.
|
||||
yield acme.start_listening()
|
||||
|
||||
# We want to reprovision if cert_days_remaining is None (meaning no
|
||||
# certificate exists), or the days remaining number it returns
|
||||
# is less than our re-registration threshold.
|
||||
if (cert_days_remaining is None) or (
|
||||
not cert_days_remaining > hs.config.acme_reprovision_threshold
|
||||
):
|
||||
yield acme.provision_certificate()
|
||||
|
||||
# Read the certificate from disk and build the context factories for
|
||||
# TLS.
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening()
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
except Exception as e:
|
||||
# If a DeferredList failed (like in listening on the ACME listener),
|
||||
# we need to print the subfailure explicitly.
|
||||
if isinstance(e, defer.FirstError):
|
||||
e.subFailure.printTraceback(sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Something else went wrong when starting. Print it and bail out.
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -531,7 +615,7 @@ def run(hs):
|
||||
)
|
||||
|
||||
start_generate_monthly_active_users()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
|
||||
@@ -151,23 +151,23 @@ def start(config_options):
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
@@ -211,24 +211,24 @@ def start(config_options):
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.update_user_directory = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = UserDirectoryServer(
|
||||
ss = UserDirectoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
ss.setup()
|
||||
|
||||
def start():
|
||||
ps.get_datastore().start_profiling()
|
||||
ss.config.read_certificate_from_disk()
|
||||
ss.tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
ss.tls_client_options_factory = context_factory.ClientTLSOptionsFactory(
|
||||
config
|
||||
)
|
||||
ss.start_listening(config.worker_listeners)
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from synapse.config._base import ConfigError
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
from homeserver import HomeServerConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
action = sys.argv[1]
|
||||
|
||||
|
||||
@@ -134,10 +134,6 @@ class Config(object):
|
||||
with open(file_path) as file_stream:
|
||||
return file_stream.read()
|
||||
|
||||
@staticmethod
|
||||
def default_path(name):
|
||||
return os.path.abspath(os.path.join(os.path.curdir, name))
|
||||
|
||||
@staticmethod
|
||||
def read_config_file(file_path):
|
||||
with open(file_path) as file_stream:
|
||||
@@ -151,8 +147,39 @@ class Config(object):
|
||||
return results
|
||||
|
||||
def generate_config(
|
||||
self, config_dir_path, server_name, is_generating_file, report_stats=None
|
||||
self,
|
||||
config_dir_path,
|
||||
data_dir_path,
|
||||
server_name,
|
||||
generate_secrets=False,
|
||||
report_stats=None,
|
||||
):
|
||||
"""Build a default configuration file
|
||||
|
||||
This is used both when the user explicitly asks us to generate a config file
|
||||
(eg with --generate_config), and before loading the config at runtime (to give
|
||||
a base which the config files override)
|
||||
|
||||
Args:
|
||||
config_dir_path (str): The path where the config files are kept. Used to
|
||||
create filenames for things like the log config and the signing key.
|
||||
|
||||
data_dir_path (str): The path where the data files are kept. Used to create
|
||||
filenames for things like the database and media store.
|
||||
|
||||
server_name (str): The server name. Used to initialise the server_name
|
||||
config param, but also used in the names of some of the config files.
|
||||
|
||||
generate_secrets (bool): True if we should generate new secrets for things
|
||||
like the macaroon_secret_key. If False, these parameters will be left
|
||||
unset.
|
||||
|
||||
report_stats (bool|None): Initial setting for the report_stats setting.
|
||||
If None, report_stats will be left unset.
|
||||
|
||||
Returns:
|
||||
str: the yaml config file
|
||||
"""
|
||||
default_config = "# vim:ft=yaml\n"
|
||||
|
||||
default_config += "\n\n".join(
|
||||
@@ -160,15 +187,14 @@ class Config(object):
|
||||
for conf in self.invoke_all(
|
||||
"default_config",
|
||||
config_dir_path=config_dir_path,
|
||||
data_dir_path=data_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=is_generating_file,
|
||||
generate_secrets=generate_secrets,
|
||||
report_stats=report_stats,
|
||||
)
|
||||
)
|
||||
|
||||
config = yaml.load(default_config)
|
||||
|
||||
return default_config, config
|
||||
return default_config
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, description, argv):
|
||||
@@ -274,12 +300,14 @@ class Config(object):
|
||||
if not cls.path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "w") as config_file:
|
||||
config_str, config = obj.generate_config(
|
||||
config_str = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
data_dir_path=os.getcwd(),
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True,
|
||||
generate_secrets=True,
|
||||
)
|
||||
config = yaml.load(config_str)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_str)
|
||||
print(
|
||||
@@ -339,7 +367,7 @@ class Config(object):
|
||||
if not keys_directory:
|
||||
keys_directory = os.path.dirname(config_files[-1])
|
||||
|
||||
config_dir_path = os.path.abspath(keys_directory)
|
||||
self.config_dir_path = os.path.abspath(keys_directory)
|
||||
|
||||
specified_config = {}
|
||||
for config_file in config_files:
|
||||
@@ -350,11 +378,13 @@ class Config(object):
|
||||
raise ConfigError(MISSING_SERVER_NAME)
|
||||
|
||||
server_name = specified_config["server_name"]
|
||||
_, config = self.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
config_string = self.generate_config(
|
||||
config_dir_path=self.config_dir_path,
|
||||
data_dir_path=os.getcwd(),
|
||||
server_name=server_name,
|
||||
is_generating_file=False,
|
||||
generate_secrets=False,
|
||||
)
|
||||
config = yaml.load(config_string)
|
||||
config.pop("log_config")
|
||||
config.update(specified_config)
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ class ApiConfig(Config):
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
])
|
||||
|
||||
@@ -36,5 +37,6 @@ class ApiConfig(Config):
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
- "{RoomAvatar}"
|
||||
- "{RoomEncryption}"
|
||||
- "{Name}"
|
||||
""".format(**vars(EventTypes))
|
||||
|
||||
@@ -13,6 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from os import path
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
from ._base import Config
|
||||
|
||||
DEFAULT_CONFIG = """\
|
||||
@@ -85,7 +89,15 @@ class ConsentConfig(Config):
|
||||
if consent_config is None:
|
||||
return
|
||||
self.user_consent_version = str(consent_config["version"])
|
||||
self.user_consent_template_dir = consent_config["template_dir"]
|
||||
self.user_consent_template_dir = self.abspath(
|
||||
consent_config["template_dir"]
|
||||
)
|
||||
if not path.isdir(self.user_consent_template_dir):
|
||||
raise ConfigError(
|
||||
"Could not find template directory '%s'" % (
|
||||
self.user_consent_template_dir,
|
||||
),
|
||||
)
|
||||
self.user_consent_server_notice_content = consent_config.get(
|
||||
"server_notice_content",
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
|
||||
from ._base import Config
|
||||
|
||||
@@ -45,8 +46,8 @@ class DatabaseConfig(Config):
|
||||
|
||||
self.set_databasepath(config.get("database_path"))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
database_path = self.abspath("homeserver.db")
|
||||
def default_config(self, data_dir_path, **kwargs):
|
||||
database_path = os.path.join(data_dir_path, "homeserver.db")
|
||||
return """\
|
||||
# Database configuration
|
||||
database:
|
||||
|
||||
@@ -32,7 +32,7 @@ from .ratelimiting import RatelimitConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .saml2_config import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
@@ -53,10 +53,3 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
ServerNoticesConfig, RoomDirectoryConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
sys.stdout.write(
|
||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
|
||||
)
|
||||
|
||||
@@ -66,26 +66,32 @@ class KeyConfig(Config):
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
def default_config(self, config_dir_path, server_name, generate_secrets=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
if is_generating_file:
|
||||
macaroon_secret_key = '"%s"' % random_string_with_symbols(50)
|
||||
form_secret = '"%s"' % random_string_with_symbols(50)
|
||||
if generate_secrets:
|
||||
macaroon_secret_key = 'macaroon_secret_key: "%s"' % (
|
||||
random_string_with_symbols(50),
|
||||
)
|
||||
form_secret = 'form_secret: "%s"' % random_string_with_symbols(50)
|
||||
else:
|
||||
macaroon_secret_key = 'null'
|
||||
form_secret = 'null'
|
||||
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
|
||||
form_secret = "# form_secret: <PRIVATE STRING>"
|
||||
|
||||
return """\
|
||||
macaroon_secret_key: %(macaroon_secret_key)s
|
||||
# a secret which is used to sign access tokens. If none is specified,
|
||||
# the registration_shared_secret is used, if one is given; otherwise,
|
||||
# a secret key is derived from the signing key.
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
form_secret: %(form_secret)s
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
%(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
|
||||
@@ -80,9 +80,7 @@ class LoggingConfig(Config):
|
||||
self.log_file = self.abspath(config.get("log_file"))
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
log_config = self.abspath(
|
||||
os.path.join(config_dir_path, server_name + ".log.config")
|
||||
)
|
||||
log_config = os.path.join(config_dir_path, server_name + ".log.config")
|
||||
return """
|
||||
# A yaml python logging config file
|
||||
log_config: "%(log_config)s"
|
||||
@@ -129,7 +127,7 @@ class LoggingConfig(Config):
|
||||
)
|
||||
|
||||
|
||||
def setup_logging(config, use_worker_options=False):
|
||||
def setup_logging(config, use_worker_options=False, register_sighup=None):
|
||||
""" Set up python logging
|
||||
|
||||
Args:
|
||||
@@ -138,7 +136,16 @@ def setup_logging(config, use_worker_options=False):
|
||||
|
||||
use_worker_options (bool): True to use 'worker_log_config' and
|
||||
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||
|
||||
register_sighup (func | None): Function to call to register a
|
||||
sighup handler.
|
||||
"""
|
||||
if not register_sighup:
|
||||
if getattr(signal, "SIGHUP"):
|
||||
register_sighup = lambda x: signal.signal(signal.SIGHUP, x)
|
||||
else:
|
||||
register_sighup = lambda x: None
|
||||
|
||||
log_config = (config.worker_log_config if use_worker_options
|
||||
else config.log_config)
|
||||
log_file = (config.worker_log_file if use_worker_options
|
||||
@@ -200,13 +207,7 @@ def setup_logging(config, use_worker_options=False):
|
||||
|
||||
load_log_config()
|
||||
|
||||
# TODO(paul): obviously this is a terrible mechanism for
|
||||
# stealing SIGHUP, because it means no other part of synapse
|
||||
# can use it instead. If we want to catch SIGHUP anywhere
|
||||
# else as well, I'd suggest we find a nicer way to broadcast
|
||||
# it around.
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
register_sighup(sighup)
|
||||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
|
||||
@@ -24,10 +24,16 @@ class MetricsConfig(Config):
|
||||
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
|
||||
|
||||
def default_config(self, report_stats=None, **kwargs):
|
||||
suffix = "" if report_stats is None else "report_stats: %(report_stats)s\n"
|
||||
return ("""\
|
||||
res = """\
|
||||
## Metrics ###
|
||||
|
||||
# Enable collection and rendering of performance metrics
|
||||
enable_metrics: False
|
||||
""" + suffix) % locals()
|
||||
"""
|
||||
|
||||
if report_stats is None:
|
||||
res += "# report_stats: true|false\n"
|
||||
else:
|
||||
res += "report_stats: %s\n" % ('true' if report_stats else 'false')
|
||||
|
||||
return res
|
||||
|
||||
@@ -37,6 +37,7 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
|
||||
self.invite_3pid_guest = (
|
||||
@@ -49,8 +50,17 @@ class RegistrationConfig(Config):
|
||||
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
self.disable_msisdn_registration = (
|
||||
config.get("disable_msisdn_registration", False)
|
||||
)
|
||||
|
||||
def default_config(self, generate_secrets=False, **kwargs):
|
||||
if generate_secrets:
|
||||
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
||||
random_string_with_symbols(50),
|
||||
)
|
||||
else:
|
||||
registration_shared_secret = '# registration_shared_secret: <PRIVATE STRING>'
|
||||
|
||||
return """\
|
||||
## Registration ##
|
||||
@@ -64,20 +74,25 @@ class RegistrationConfig(Config):
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Explicitly disable asking for MSISDNs from the registration
|
||||
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
|
||||
#
|
||||
# disable_msisdn_registration = True
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: ".*@matrix\\.org"
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# - medium: email
|
||||
# pattern: ".*@vector\\.im"
|
||||
# pattern: '.*@vector\\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: "\\+44"
|
||||
# pattern: '\\+44'
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
%(registration_shared_secret)s
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
@@ -91,6 +106,14 @@ class RegistrationConfig(Config):
|
||||
# accessible to anonymous users.
|
||||
allow_guest_access: False
|
||||
|
||||
# The identity server which we suggest that clients should use when users log
|
||||
# in on this server.
|
||||
#
|
||||
# (By default, no suggestion is made, so it is left up to the client.
|
||||
# This setting is ignored unless public_baseurl is also set.)
|
||||
#
|
||||
# default_identity_server: https://matrix.org
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from collections import namedtuple
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
@@ -175,9 +175,9 @@ class ContentRepositoryConfig(Config):
|
||||
"url_preview_url_blacklist", ()
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
media_store = self.default_path("media_store")
|
||||
uploads_path = self.default_path("uploads")
|
||||
def default_config(self, data_dir_path, **kwargs):
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
uploads_path = os.path.join(data_dir_path, "uploads")
|
||||
return r"""
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Ericsson
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class SAML2Config(Config):
|
||||
"""SAML2 Configuration
|
||||
Synapse uses pysaml2 libraries for providing SAML2 support
|
||||
|
||||
config_path: Path to the sp_conf.py configuration file
|
||||
idp_redirect_url: Identity provider URL which will redirect
|
||||
the user back to /login/saml2 with proper info.
|
||||
|
||||
sp_conf.py file is something like:
|
||||
https://github.com/rohe/pysaml2/blob/master/example/sp-repoze/sp_conf.py.example
|
||||
|
||||
More information: https://pythonhosted.org/pysaml2/howto/config.html
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
saml2_config = config.get("saml2_config", None)
|
||||
if saml2_config:
|
||||
self.saml2_enabled = saml2_config.get("enabled", True)
|
||||
self.saml2_config_path = saml2_config["config_path"]
|
||||
self.saml2_idp_redirect_url = saml2_config["idp_redirect_url"]
|
||||
else:
|
||||
self.saml2_enabled = False
|
||||
self.saml2_config_path = None
|
||||
self.saml2_idp_redirect_url = None
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable SAML2 for registration and login. Uses pysaml2
|
||||
# config_path: Path to the sp_conf.py configuration file
|
||||
# idp_redirect_url: Identity provider URL which will redirect
|
||||
# the user back to /login/saml2 with proper info.
|
||||
# See pysaml2 docs for format of config.
|
||||
#saml2_config:
|
||||
# enabled: true
|
||||
# config_path: "%s/sp_conf.py"
|
||||
# idp_redirect_url: "http://%s/idp"
|
||||
""" % (config_dir_path, server_name)
|
||||
110
synapse/config/saml2_config.py
Normal file
110
synapse/config/saml2_config.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
class SAML2Config(Config):
|
||||
def read_config(self, config):
|
||||
self.saml2_enabled = False
|
||||
|
||||
saml2_config = config.get("saml2_config")
|
||||
|
||||
if not saml2_config or not saml2_config.get("enabled", True):
|
||||
return
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
import saml2.config
|
||||
self.saml2_sp_config = saml2.config.SPConfig()
|
||||
self.saml2_sp_config.load(self._default_saml_config_dict())
|
||||
self.saml2_sp_config.load(saml2_config.get("sp_config", {}))
|
||||
|
||||
config_path = saml2_config.get("config_path", None)
|
||||
if config_path is not None:
|
||||
self.saml2_sp_config.load_file(config_path)
|
||||
|
||||
def _default_saml_config_dict(self):
|
||||
import saml2
|
||||
|
||||
public_baseurl = self.public_baseurl
|
||||
if public_baseurl is None:
|
||||
raise ConfigError(
|
||||
"saml2_config requires a public_baseurl to be set"
|
||||
)
|
||||
|
||||
metadata_url = public_baseurl + "_matrix/saml2/metadata.xml"
|
||||
response_url = public_baseurl + "_matrix/saml2/authn_response"
|
||||
return {
|
||||
"entityid": metadata_url,
|
||||
|
||||
"service": {
|
||||
"sp": {
|
||||
"endpoints": {
|
||||
"assertion_consumer_service": [
|
||||
(response_url, saml2.BINDING_HTTP_POST),
|
||||
],
|
||||
},
|
||||
"required_attributes": ["uid"],
|
||||
"optional_attributes": ["mail", "surname", "givenname"],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable SAML2 for registration and login. Uses pysaml2.
|
||||
#
|
||||
# saml2_config:
|
||||
#
|
||||
# # The following is the configuration for the pysaml2 Service Provider.
|
||||
# # See pysaml2 docs for format of config.
|
||||
# #
|
||||
# # Default values will be used for the 'entityid' and 'service' settings,
|
||||
# # so it is not normally necessary to specify them unless you need to
|
||||
# # override them.
|
||||
#
|
||||
# sp_config:
|
||||
# # point this to the IdP's metadata. You can use either a local file or
|
||||
# # (preferably) a URL.
|
||||
# metadata:
|
||||
# # local: ["saml2/idp.xml"]
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # The following is just used to generate our metadata xml, and you
|
||||
# # may well not need it, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
#
|
||||
# # Instead of putting the config inline as above, you can specify a
|
||||
# # separate pysaml2 configuration file:
|
||||
# #
|
||||
# # config_path: "%(config_dir_path)s/sp_conf.py"
|
||||
""" % {"config_dir_path": config_dir_path}
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -203,7 +205,9 @@ class ServerConfig(Config):
|
||||
]
|
||||
})
|
||||
|
||||
def default_config(self, server_name, **kwargs):
|
||||
_check_resource_config(self.listeners)
|
||||
|
||||
def default_config(self, server_name, data_dir_path, **kwargs):
|
||||
_, bind_port = parse_and_validate_server_name(server_name)
|
||||
if bind_port is not None:
|
||||
unsecure_port = bind_port - 400
|
||||
@@ -211,7 +215,7 @@ class ServerConfig(Config):
|
||||
bind_port = 8448
|
||||
unsecure_port = 8008
|
||||
|
||||
pid_file = self.abspath("homeserver.pid")
|
||||
pid_file = os.path.join(data_dir_path, "homeserver.pid")
|
||||
return """\
|
||||
## Server ##
|
||||
|
||||
@@ -252,8 +256,12 @@ class ServerConfig(Config):
|
||||
#
|
||||
# web_client_location: "/path/to/web/root"
|
||||
|
||||
# The public-facing base URL for the client API (not including _matrix/...)
|
||||
# public_baseurl: https://example.com:8448/
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
# enter into the 'custom HS URL' field on their client. If you
|
||||
# use synapse with a reverse proxy, this should be the URL to reach
|
||||
# synapse via the proxy.
|
||||
# public_baseurl: https://example.com/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
@@ -356,41 +364,41 @@ class ServerConfig(Config):
|
||||
# type: manhole
|
||||
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
#
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
# enable_search: true
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_contact: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
#
|
||||
# Room searching
|
||||
#
|
||||
# If disabled, new messages will not be indexed for searching and users
|
||||
# will receive errors when searching for messages. Defaults to enabled.
|
||||
# enable_search: true
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
@@ -416,19 +424,18 @@ class ServerConfig(Config):
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def is_threepid_reserved(config, threepid):
|
||||
def is_threepid_reserved(reserved_threepids, threepid):
|
||||
"""Check the threepid against the reserved threepid config
|
||||
Args:
|
||||
config(ServerConfig) - to access server config attributes
|
||||
reserved_threepids([dict]) - list of reserved threepids
|
||||
threepid(dict) - The threepid to test for
|
||||
|
||||
Returns:
|
||||
boolean Is the threepid undertest reserved_user
|
||||
"""
|
||||
|
||||
for tp in config.mau_limits_reserved_threepids:
|
||||
if (threepid['medium'] == tp['medium']
|
||||
and threepid['address'] == tp['address']):
|
||||
for tp in reserved_threepids:
|
||||
if (threepid['medium'] == tp['medium'] and threepid['address'] == tp['address']):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -464,3 +471,36 @@ def _warn_if_webclient_configured(listeners):
|
||||
if name == 'webclient':
|
||||
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
|
||||
return
|
||||
|
||||
|
||||
KNOWN_RESOURCES = (
|
||||
'client',
|
||||
'consent',
|
||||
'federation',
|
||||
'keys',
|
||||
'media',
|
||||
'metrics',
|
||||
'replication',
|
||||
'static',
|
||||
'webclient',
|
||||
)
|
||||
|
||||
|
||||
def _check_resource_config(listeners):
|
||||
resource_names = set(
|
||||
res_name
|
||||
for listener in listeners
|
||||
for res in listener.get("resources", [])
|
||||
for res_name in res.get("names", [])
|
||||
)
|
||||
|
||||
for resource in resource_names:
|
||||
if resource not in KNOWN_RESOURCES:
|
||||
raise ConfigError(
|
||||
"Unknown listener resource '%s'" % (resource, )
|
||||
)
|
||||
if resource == "consent":
|
||||
try:
|
||||
check_requirements('resources.consent')
|
||||
except DependencyException as e:
|
||||
raise ConfigError(e.message)
|
||||
|
||||
@@ -13,52 +13,42 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
|
||||
from ._base import Config
|
||||
from synapse.config._base import Config
|
||||
|
||||
GENERATE_DH_PARAMS = False
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
class TlsConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.tls_certificate = self.read_tls_certificate(
|
||||
config.get("tls_certificate_path")
|
||||
)
|
||||
self.tls_certificate_file = config.get("tls_certificate_path")
|
||||
|
||||
acme_config = config.get("acme", None)
|
||||
if acme_config is None:
|
||||
acme_config = {}
|
||||
|
||||
self.acme_enabled = acme_config.get("enabled", False)
|
||||
self.acme_url = acme_config.get(
|
||||
"url", "https://acme-v01.api.letsencrypt.org/directory"
|
||||
)
|
||||
self.acme_port = acme_config.get("port", 80)
|
||||
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
|
||||
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
|
||||
|
||||
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
|
||||
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
|
||||
self._original_tls_fingerprints = config["tls_fingerprints"]
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
self.no_tls = config.get("no_tls", False)
|
||||
|
||||
if self.no_tls:
|
||||
self.tls_private_key = None
|
||||
else:
|
||||
self.tls_private_key = self.read_tls_private_key(
|
||||
config.get("tls_private_key_path")
|
||||
)
|
||||
|
||||
self.tls_dh_params_path = self.check_file(
|
||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||
)
|
||||
|
||||
self.tls_fingerprints = config["tls_fingerprints"]
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1,
|
||||
self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
# It should never be used in production, and is intended for
|
||||
@@ -67,29 +57,152 @@ class TlsConfig(Config):
|
||||
"use_insecure_ssl_client_just_for_testing_do_not_use"
|
||||
)
|
||||
|
||||
self.tls_certificate = None
|
||||
self.tls_private_key = None
|
||||
|
||||
def is_disk_cert_valid(self):
|
||||
"""
|
||||
Is the certificate we have on disk valid, and if so, for how long?
|
||||
|
||||
Returns:
|
||||
int: Days remaining of certificate validity.
|
||||
None: No certificate exists.
|
||||
"""
|
||||
if not os.path.exists(self.tls_certificate_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(self.tls_certificate_file, 'rb') as f:
|
||||
cert_pem = f.read()
|
||||
except Exception:
|
||||
logger.exception("Failed to read existing certificate off disk!")
|
||||
raise
|
||||
|
||||
try:
|
||||
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
|
||||
except Exception:
|
||||
logger.exception("Failed to parse existing certificate off disk!")
|
||||
raise
|
||||
|
||||
# YYYYMMDDhhmmssZ -- in UTC
|
||||
expires_on = datetime.strptime(
|
||||
tls_certificate.get_notAfter().decode('ascii'), "%Y%m%d%H%M%SZ"
|
||||
)
|
||||
now = datetime.utcnow()
|
||||
days_remaining = (expires_on - now).days
|
||||
return days_remaining
|
||||
|
||||
def read_certificate_from_disk(self):
|
||||
"""
|
||||
Read the certificates from disk.
|
||||
"""
|
||||
self.tls_certificate = self.read_tls_certificate(self.tls_certificate_file)
|
||||
|
||||
# Check if it is self-signed, and issue a warning if so.
|
||||
if self.tls_certificate.get_issuer() == self.tls_certificate.get_subject():
|
||||
warnings.warn(
|
||||
(
|
||||
"Self-signed TLS certificates will not be accepted by Synapse 1.0. "
|
||||
"Please either provide a valid certificate, or use Synapse's ACME "
|
||||
"support to provision one."
|
||||
)
|
||||
)
|
||||
|
||||
if not self.no_tls:
|
||||
self.tls_private_key = self.read_tls_private_key(self.tls_private_key_file)
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
tls_certificate_path = base_key_name + ".tls.crt"
|
||||
tls_private_key_path = base_key_name + ".tls.key"
|
||||
tls_dh_params_path = base_key_name + ".tls.dh"
|
||||
|
||||
return """\
|
||||
# PEM encoded X509 certificate for TLS.
|
||||
# You can replace the self-signed certificate that synapse
|
||||
# autogenerates on launch with your own SSL certificate + key pair
|
||||
# if you like. Any required intermediary certificates can be
|
||||
# appended after the primary certificate in hierarchical order.
|
||||
# this is to avoid the max line length. Sorrynotsorry
|
||||
proxypassline = (
|
||||
'ProxyPass /.well-known/acme-challenge '
|
||||
'http://localhost:8009/.well-known/acme-challenge'
|
||||
)
|
||||
|
||||
return (
|
||||
"""\
|
||||
# PEM-encoded X509 certificate for TLS.
|
||||
# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
|
||||
# certificate, signed by a recognised Certificate Authority.
|
||||
#
|
||||
# See 'ACME support' below to enable auto-provisioning this certificate via
|
||||
# Let's Encrypt.
|
||||
#
|
||||
tls_certificate_path: "%(tls_certificate_path)s"
|
||||
|
||||
# PEM encoded private key for TLS
|
||||
# PEM-encoded private key for TLS
|
||||
tls_private_key_path: "%(tls_private_key_path)s"
|
||||
|
||||
# PEM dh parameters for ephemeral keys
|
||||
tls_dh_params_path: "%(tls_dh_params_path)s"
|
||||
# ACME support: This will configure Synapse to request a valid TLS certificate
|
||||
# for your configured `server_name` via Let's Encrypt.
|
||||
#
|
||||
# Note that provisioning a certificate in this way requires port 80 to be
|
||||
# routed to Synapse so that it can complete the http-01 ACME challenge.
|
||||
# By default, if you enable ACME support, Synapse will attempt to listen on
|
||||
# port 80 for incoming http-01 challenges - however, this will likely fail
|
||||
# with 'Permission denied' or a similar error.
|
||||
#
|
||||
# There are a couple of potential solutions to this:
|
||||
#
|
||||
# * If you already have an Apache, Nginx, or similar listening on port 80,
|
||||
# you can configure Synapse to use an alternate port, and have your web
|
||||
# server forward the requests. For example, assuming you set 'port: 8009'
|
||||
# below, on Apache, you would write:
|
||||
#
|
||||
# %(proxypassline)s
|
||||
#
|
||||
# * Alternatively, you can use something like `authbind` to give Synapse
|
||||
# permission to listen on port 80.
|
||||
#
|
||||
acme:
|
||||
# ACME support is disabled by default. Uncomment the following line
|
||||
# to enable it.
|
||||
#
|
||||
# enabled: true
|
||||
|
||||
# Don't bind to the https port
|
||||
no_tls: False
|
||||
# Endpoint to use to request certificates. If you only want to test,
|
||||
# use Let's Encrypt's staging url:
|
||||
# https://acme-staging.api.letsencrypt.org/directory
|
||||
#
|
||||
# url: https://acme-v01.api.letsencrypt.org/directory
|
||||
|
||||
# Port number to listen on for the HTTP-01 challenge. Change this if
|
||||
# you are forwarding connections through Apache/Nginx/etc.
|
||||
#
|
||||
# port: 80
|
||||
|
||||
# Local addresses to listen on for incoming connections.
|
||||
# Again, you may want to change this if you are forwarding connections
|
||||
# through Apache/Nginx/etc.
|
||||
#
|
||||
# bind_addresses: ['::', '0.0.0.0']
|
||||
|
||||
# How many days remaining on a certificate before it is renewed.
|
||||
#
|
||||
# reprovision_threshold: 30
|
||||
|
||||
# If your server runs behind a reverse-proxy which terminates TLS connections
|
||||
# (for both client and federation connections), it may be useful to disable
|
||||
# All TLS support for incoming connections. Setting no_tls to False will
|
||||
# do so (and avoid the need to give synapse a TLS private key).
|
||||
#
|
||||
# no_tls: False
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
@@ -118,7 +231,10 @@ class TlsConfig(Config):
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
""" % locals()
|
||||
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
cert_pem = self.read_file(cert_path, "tls_certificate")
|
||||
@@ -127,69 +243,3 @@ class TlsConfig(Config):
|
||||
def read_tls_private_key(self, private_key_path):
|
||||
private_key_pem = self.read_file(private_key_path, "tls_private_key")
|
||||
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
||||
|
||||
def generate_files(self, config):
|
||||
tls_certificate_path = config["tls_certificate_path"]
|
||||
tls_private_key_path = config["tls_private_key_path"]
|
||||
tls_dh_params_path = config["tls_dh_params_path"]
|
||||
|
||||
if not self.path_exists(tls_private_key_path):
|
||||
with open(tls_private_key_path, "wb") as private_key_file:
|
||||
tls_private_key = crypto.PKey()
|
||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||
private_key_pem = crypto.dump_privatekey(
|
||||
crypto.FILETYPE_PEM, tls_private_key
|
||||
)
|
||||
private_key_file.write(private_key_pem)
|
||||
else:
|
||||
with open(tls_private_key_path) as private_key_file:
|
||||
private_key_pem = private_key_file.read()
|
||||
tls_private_key = crypto.load_privatekey(
|
||||
crypto.FILETYPE_PEM, private_key_pem
|
||||
)
|
||||
|
||||
if not self.path_exists(tls_certificate_path):
|
||||
with open(tls_certificate_path, "wb") as certificate_file:
|
||||
cert = crypto.X509()
|
||||
subject = cert.get_subject()
|
||||
subject.CN = config["server_name"]
|
||||
|
||||
cert.set_serial_number(1000)
|
||||
cert.gmtime_adj_notBefore(0)
|
||||
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
|
||||
cert.set_issuer(cert.get_subject())
|
||||
cert.set_pubkey(tls_private_key)
|
||||
|
||||
cert.sign(tls_private_key, 'sha256')
|
||||
|
||||
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
|
||||
|
||||
certificate_file.write(cert_pem)
|
||||
|
||||
if not self.path_exists(tls_dh_params_path):
|
||||
if GENERATE_DH_PARAMS:
|
||||
subprocess.check_call([
|
||||
"openssl", "dhparam",
|
||||
"-outform", "PEM",
|
||||
"-out", tls_dh_params_path,
|
||||
"2048"
|
||||
])
|
||||
else:
|
||||
with open(tls_dh_params_path, "w") as dh_params_file:
|
||||
dh_params_file.write(
|
||||
"2048-bit DH parameters taken from rfc3526\n"
|
||||
"-----BEGIN DH PARAMETERS-----\n"
|
||||
"MIIBCAKCAQEA///////////JD9qiIWjC"
|
||||
"NMTGYouA3BzRKQJOCIpnzHQCC76mOxOb\n"
|
||||
"IlFKCHmONATd75UZs806QxswKwpt8l8U"
|
||||
"N0/hNW1tUcJF5IW1dmJefsb0TELppjft\n"
|
||||
"awv/XLb0Brft7jhr+1qJn6WunyQRfEsf"
|
||||
"5kkoZlHs5Fs9wgB8uKFjvwWY2kg2HFXT\n"
|
||||
"mmkWP6j9JM9fg2VdI9yjrZYcYvNWIIVS"
|
||||
"u57VKQdwlpZtZww1Tkq8mATxdGwIyhgh\n"
|
||||
"fDKQXkYuNs474553LBgOhgObJ4Oi7Aei"
|
||||
"j7XFXfBvTFLJ3ivL9pVYFxg5lUl86pVq\n"
|
||||
"5RXSJhiY+gUQFXKOWoqsqmj/////////"
|
||||
"/wIBAg==\n"
|
||||
"-----END DH PARAMETERS-----\n"
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ from zope.interface import implementer
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
||||
from twisted.python.failure import Failure
|
||||
@@ -46,8 +47,10 @@ class ServerContextFactory(ContextFactory):
|
||||
if not config.no_tls:
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
|
||||
context.load_tmp_dh(config.tls_dh_params_path)
|
||||
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
context.set_cipher_list(
|
||||
"ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1"
|
||||
)
|
||||
|
||||
def getContext(self):
|
||||
return self._context
|
||||
@@ -96,8 +99,14 @@ class ClientTLSOptions(object):
|
||||
|
||||
def __init__(self, hostname, ctx):
|
||||
self._ctx = ctx
|
||||
self._hostname = hostname
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
|
||||
if isIPAddress(hostname) or isIPv6Address(hostname):
|
||||
self._hostnameBytes = hostname.encode('ascii')
|
||||
self._sendSNI = False
|
||||
else:
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
self._sendSNI = True
|
||||
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
@@ -109,7 +118,9 @@ class ClientTLSOptions(object):
|
||||
return connection
|
||||
|
||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START:
|
||||
# Literal IPv4 and IPv6 addresses are not permitted
|
||||
# as host names according to the RFCs
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
|
||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ from signedjson.sign import sign_json
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.events.utils import prune_event, prune_event_dict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
||||
"""Check whether the hash for this PDU matches the contents"""
|
||||
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
||||
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
|
||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||
|
||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||
@@ -59,35 +59,70 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
||||
return message_hash_bytes == expected_hash
|
||||
|
||||
|
||||
def compute_content_hash(event, hash_algorithm):
|
||||
event_json = event.get_pdu_json()
|
||||
event_json.pop("age_ts", None)
|
||||
event_json.pop("unsigned", None)
|
||||
event_json.pop("signatures", None)
|
||||
event_json.pop("hashes", None)
|
||||
event_json.pop("outlier", None)
|
||||
event_json.pop("destinations", None)
|
||||
def compute_content_hash(event_dict, hash_algorithm):
|
||||
"""Compute the content hash of an event, which is the hash of the
|
||||
unredacted event.
|
||||
|
||||
event_json_bytes = encode_canonical_json(event_json)
|
||||
Args:
|
||||
event_dict (dict): The unredacted event as a dict
|
||||
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
|
||||
to hash the event
|
||||
|
||||
Returns:
|
||||
tuple[str, bytes]: A tuple of the name of hash and the hash as raw
|
||||
bytes.
|
||||
"""
|
||||
event_dict = dict(event_dict)
|
||||
event_dict.pop("age_ts", None)
|
||||
event_dict.pop("unsigned", None)
|
||||
event_dict.pop("signatures", None)
|
||||
event_dict.pop("hashes", None)
|
||||
event_dict.pop("outlier", None)
|
||||
event_dict.pop("destinations", None)
|
||||
|
||||
event_json_bytes = encode_canonical_json(event_dict)
|
||||
|
||||
hashed = hash_algorithm(event_json_bytes)
|
||||
return (hashed.name, hashed.digest())
|
||||
|
||||
|
||||
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
|
||||
"""Computes the event reference hash. This is the hash of the redacted
|
||||
event.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent)
|
||||
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
|
||||
to hash the event
|
||||
|
||||
Returns:
|
||||
tuple[str, bytes]: A tuple of the name of hash and the hash as raw
|
||||
bytes.
|
||||
"""
|
||||
tmp_event = prune_event(event)
|
||||
event_json = tmp_event.get_pdu_json()
|
||||
event_json.pop("signatures", None)
|
||||
event_json.pop("age_ts", None)
|
||||
event_json.pop("unsigned", None)
|
||||
event_json_bytes = encode_canonical_json(event_json)
|
||||
event_dict = tmp_event.get_pdu_json()
|
||||
event_dict.pop("signatures", None)
|
||||
event_dict.pop("age_ts", None)
|
||||
event_dict.pop("unsigned", None)
|
||||
event_json_bytes = encode_canonical_json(event_dict)
|
||||
hashed = hash_algorithm(event_json_bytes)
|
||||
return (hashed.name, hashed.digest())
|
||||
|
||||
|
||||
def compute_event_signature(event, signature_name, signing_key):
|
||||
tmp_event = prune_event(event)
|
||||
redact_json = tmp_event.get_pdu_json()
|
||||
def compute_event_signature(event_dict, signature_name, signing_key):
|
||||
"""Compute the signature of the event for the given name and key.
|
||||
|
||||
Args:
|
||||
event_dict (dict): The event as a dict
|
||||
signature_name (str): The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
|
||||
Returns:
|
||||
dict[str, dict[str, str]]: Returns a dictionary in the same format of
|
||||
an event's signatures field.
|
||||
"""
|
||||
redact_json = prune_event_dict(event_dict)
|
||||
redact_json.pop("age_ts", None)
|
||||
redact_json.pop("unsigned", None)
|
||||
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
|
||||
@@ -96,25 +131,25 @@ def compute_event_signature(event, signature_name, signing_key):
|
||||
return redact_json["signatures"]
|
||||
|
||||
|
||||
def add_hashes_and_signatures(event, signature_name, signing_key,
|
||||
def add_hashes_and_signatures(event_dict, signature_name, signing_key,
|
||||
hash_algorithm=hashlib.sha256):
|
||||
# if hasattr(event, "old_state_events"):
|
||||
# state_json_bytes = encode_canonical_json(
|
||||
# [e.event_id for e in event.old_state_events.values()]
|
||||
# )
|
||||
# hashed = hash_algorithm(state_json_bytes)
|
||||
# event.state_hash = {
|
||||
# hashed.name: encode_base64(hashed.digest())
|
||||
# }
|
||||
"""Add content hash and sign the event
|
||||
|
||||
name, digest = compute_content_hash(event, hash_algorithm=hash_algorithm)
|
||||
Args:
|
||||
event_dict (dict): The event to add hashes to and sign
|
||||
signature_name (str): The name of the entity signing the event
|
||||
(typically the server's hostname).
|
||||
signing_key (syutil.crypto.SigningKey): The key to sign with
|
||||
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
|
||||
to hash the event
|
||||
"""
|
||||
|
||||
if not hasattr(event, "hashes"):
|
||||
event.hashes = {}
|
||||
event.hashes[name] = encode_base64(digest)
|
||||
name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm)
|
||||
|
||||
event.signatures = compute_event_signature(
|
||||
event,
|
||||
event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
|
||||
|
||||
event_dict["signatures"] = compute_event_signature(
|
||||
event_dict,
|
||||
signature_name=signature_name,
|
||||
signing_key=signing_key,
|
||||
)
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.names.error import DomainError
|
||||
from twisted.web.http import HTTPClient
|
||||
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
from synapse.util import logcontext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KEY_API_V2 = "/_matrix/key/v2/server/%s"
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_server_key(server_name, tls_client_options_factory, key_id):
|
||||
"""Fetch the keys for a remote server."""
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, tls_client_options_factory, timeout=30
|
||||
)
|
||||
|
||||
for i in range(5):
|
||||
try:
|
||||
with logcontext.PreserveLoggingContext():
|
||||
protocol = yield endpoint.connect(factory)
|
||||
server_response, server_certificate = yield protocol.remote_key
|
||||
defer.returnValue((server_response, server_certificate))
|
||||
except SynapseKeyClientError as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
if e.status.startswith(b"4"):
|
||||
# Don't retry for 4xx responses.
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
except (ConnectError, DomainError) as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
except Exception:
|
||||
logger.exception("Error getting key for %r", server_name)
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
|
||||
|
||||
class SynapseKeyClientError(Exception):
|
||||
"""The key wasn't retrieved from the remote server."""
|
||||
status = None
|
||||
pass
|
||||
|
||||
|
||||
class SynapseKeyClientProtocol(HTTPClient):
|
||||
"""Low level HTTPS client which retrieves an application/json response from
|
||||
the server and extracts the X.509 certificate for the remote peer from the
|
||||
SSL connection."""
|
||||
|
||||
timeout = 30
|
||||
|
||||
def __init__(self):
|
||||
self.remote_key = defer.Deferred()
|
||||
self.host = None
|
||||
self._peer = None
|
||||
|
||||
def connectionMade(self):
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
if not isinstance(self.path, bytes):
|
||||
self.path = self.path.encode('ascii')
|
||||
|
||||
if not isinstance(self.host, bytes):
|
||||
self.host = self.host.encode('ascii')
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
self.endHeaders()
|
||||
self.timer = reactor.callLater(
|
||||
self.timeout,
|
||||
self.on_timeout
|
||||
)
|
||||
|
||||
def errback(self, error):
|
||||
if not self.remote_key.called:
|
||||
self.remote_key.errback(error)
|
||||
|
||||
def callback(self, result):
|
||||
if not self.remote_key.called:
|
||||
self.remote_key.callback(result)
|
||||
|
||||
def handleStatus(self, version, status, message):
|
||||
if status != b"200":
|
||||
# logger.info("Non-200 response from %s: %s %s",
|
||||
# self.transport.getHost(), status, message)
|
||||
error = SynapseKeyClientError(
|
||||
"Non-200 response %r from %r" % (status, self.host)
|
||||
)
|
||||
error.status = status
|
||||
self.errback(error)
|
||||
self.transport.abortConnection()
|
||||
|
||||
def handleResponse(self, response_body_bytes):
|
||||
try:
|
||||
json_response = json.loads(response_body_bytes)
|
||||
except ValueError:
|
||||
# logger.info("Invalid JSON response from %s",
|
||||
# self.transport.getHost())
|
||||
self.transport.abortConnection()
|
||||
return
|
||||
|
||||
certificate = self.transport.getPeerCertificate()
|
||||
self.callback((json_response, certificate))
|
||||
self.transport.abortConnection()
|
||||
self.timer.cancel()
|
||||
|
||||
def on_timeout(self):
|
||||
logger.debug(
|
||||
"Timeout waiting for response from %s: %s",
|
||||
self.host, self._peer,
|
||||
)
|
||||
self.errback(IOError("Timeout waiting for response"))
|
||||
self.transport.abortConnection()
|
||||
|
||||
|
||||
class SynapseKeyClientFactory(Factory):
|
||||
def protocol(self):
|
||||
protocol = SynapseKeyClientProtocol()
|
||||
protocol.path = self.path
|
||||
protocol.host = self.host
|
||||
return protocol
|
||||
@@ -14,10 +14,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -30,13 +31,11 @@ from signedjson.sign import (
|
||||
signature_ids,
|
||||
verify_signed_json,
|
||||
)
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
LoggingContext,
|
||||
@@ -503,31 +502,16 @@ class Keyring(object):
|
||||
if requested_key_id in keys:
|
||||
continue
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory, requested_key_id
|
||||
response = yield self.client.get_json(
|
||||
destination=server_name,
|
||||
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_fingerprints" not in response:
|
||||
raise KeyLookupError("Key response missing TLS fingerprints")
|
||||
|
||||
certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
)
|
||||
sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
|
||||
sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
|
||||
|
||||
response_sha256_fingerprints = set()
|
||||
for fingerprint in response[u"tls_fingerprints"]:
|
||||
if u"sha256" in fingerprint:
|
||||
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||
|
||||
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||
raise KeyLookupError("TLS certificate not allowed by fingerprints")
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
from_server=server_name,
|
||||
requested_ids=[requested_key_id],
|
||||
|
||||
@@ -20,17 +20,25 @@ from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
|
||||
from synapse.api.constants import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
EventTypes,
|
||||
JoinRules,
|
||||
Membership,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
room_version (str): the version of the room
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
@@ -48,7 +56,6 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
|
||||
if do_sig_check:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
@@ -65,9 +72,13 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
# Check the event_id's domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
# Check the origin domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
@@ -167,7 +178,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
_check_power_levels(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(event, auth_events)
|
||||
check_redaction(room_version, event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
@@ -421,7 +432,7 @@ def _can_send_event(event, auth_events):
|
||||
return True
|
||||
|
||||
|
||||
def check_redaction(event, auth_events):
|
||||
def check_redaction(room_version, event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
@@ -441,10 +452,16 @@ def check_redaction(event, auth_events):
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
if room_version in (RoomVersions.V1, RoomVersions.V2,):
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
return True
|
||||
elif room_version == RoomVersions.V3:
|
||||
event.internal_metadata.recheck_redaction = True
|
||||
return True
|
||||
else:
|
||||
raise RuntimeError("Unrecognized room version %r" % (room_version,))
|
||||
|
||||
raise AuthError(
|
||||
403,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,6 +19,9 @@ from distutils.util import strtobool
|
||||
|
||||
import six
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersions
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
@@ -41,8 +45,13 @@ class _EventInternalMetadata(object):
|
||||
def is_outlier(self):
|
||||
return getattr(self, "outlier", False)
|
||||
|
||||
def is_invite_from_remote(self):
|
||||
return getattr(self, "invite_from_remote", False)
|
||||
def is_out_of_band_membership(self):
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
rejection. This is needed as those events are marked as outliers, but
|
||||
they still need to be processed as if they're new events (e.g. updating
|
||||
invite state in the database, relaying to clients, etc).
|
||||
"""
|
||||
return getattr(self, "out_of_band_membership", False)
|
||||
|
||||
def get_send_on_behalf_of(self):
|
||||
"""Whether this server should send the event on behalf of another server.
|
||||
@@ -53,6 +62,21 @@ class _EventInternalMetadata(object):
|
||||
"""
|
||||
return getattr(self, "send_on_behalf_of", None)
|
||||
|
||||
def need_to_check_redaction(self):
|
||||
"""Whether the redaction event needs to be rechecked when fetching
|
||||
from the database.
|
||||
|
||||
Starting in room v3 redaction events are accepted up front, and later
|
||||
checked to see if the redacter and redactee's domains match.
|
||||
|
||||
If the sender of the redaction event is allowed to redact any event
|
||||
due to auth rules, then this will always return false.
|
||||
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return getattr(self, "recheck_redaction", False)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
# We want to be able to use hasattr with the event dict properties.
|
||||
@@ -179,6 +203,8 @@ class EventBase(object):
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
|
||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||
event_dict = dict(event_dict)
|
||||
|
||||
@@ -213,16 +239,6 @@ class FrozenEvent(EventBase):
|
||||
rejected_reason=rejected_reason,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_event(event):
|
||||
e = FrozenEvent(
|
||||
event.get_pdu_json()
|
||||
)
|
||||
|
||||
e.internal_metadata = event.internal_metadata
|
||||
|
||||
return e
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
@@ -232,3 +248,127 @@ class FrozenEvent(EventBase):
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||
|
||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||
event_dict = dict(event_dict)
|
||||
|
||||
# Signatures is a dict of dicts, and this is faster than doing a
|
||||
# copy.deepcopy
|
||||
signatures = {
|
||||
name: {sig_id: sig for sig_id, sig in sigs.items()}
|
||||
for name, sigs in event_dict.pop("signatures", {}).items()
|
||||
}
|
||||
|
||||
assert "event_id" not in event_dict
|
||||
|
||||
unsigned = dict(event_dict.pop("unsigned", {}))
|
||||
|
||||
# We intern these strings because they turn up a lot (especially when
|
||||
# caching).
|
||||
event_dict = intern_dict(event_dict)
|
||||
|
||||
if USE_FROZEN_DICTS:
|
||||
frozen_dict = freeze(event_dict)
|
||||
else:
|
||||
frozen_dict = event_dict
|
||||
|
||||
self._event_id = None
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
|
||||
super(FrozenEventV2, self).__init__(
|
||||
frozen_dict,
|
||||
signatures=signatures,
|
||||
unsigned=unsigned,
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
rejected_reason=rejected_reason,
|
||||
)
|
||||
|
||||
@property
|
||||
def event_id(self):
|
||||
# We have to import this here as otherwise we get an import loop which
|
||||
# is hard to break.
|
||||
from synapse.crypto.event_signing import compute_event_reference_hash
|
||||
|
||||
if self._event_id:
|
||||
return self._event_id
|
||||
self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1])
|
||||
return self._event_id
|
||||
|
||||
def prev_event_ids(self):
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's prev_events
|
||||
"""
|
||||
return self.prev_events
|
||||
|
||||
def auth_event_ids(self):
|
||||
"""Returns the list of auth event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's auth_events
|
||||
"""
|
||||
return self.auth_events
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
|
||||
def room_version_to_event_format(room_version):
|
||||
"""Converts a room version string to the event format
|
||||
|
||||
Args:
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
int
|
||||
"""
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# We should have already checked version, so this should not happen
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
|
||||
if room_version in (
|
||||
RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
|
||||
):
|
||||
return EventFormatVersions.V1
|
||||
elif room_version in (RoomVersions.V3,):
|
||||
return EventFormatVersions.V2
|
||||
else:
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
|
||||
|
||||
def event_type_from_format_version(format_version):
|
||||
"""Returns the python type to use to construct an Event object for the
|
||||
given event format version.
|
||||
|
||||
Args:
|
||||
format_version (int): The event format version
|
||||
|
||||
Returns:
|
||||
type: A type that can be initialized as per the initializer of
|
||||
`FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
return FrozenEvent
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
return FrozenEventV2
|
||||
else:
|
||||
raise Exception(
|
||||
"No event format %r" % (format_version,)
|
||||
)
|
||||
|
||||
@@ -13,63 +13,270 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import attr
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import (
|
||||
KNOWN_EVENT_FORMAT_VERSIONS,
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
MAX_DEPTH,
|
||||
EventFormatVersions,
|
||||
)
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.types import EventID
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from . import EventBase, FrozenEvent, _event_dict_property
|
||||
from . import (
|
||||
_EventInternalMetadata,
|
||||
event_type_from_format_version,
|
||||
room_version_to_event_format,
|
||||
)
|
||||
|
||||
|
||||
class EventBuilder(EventBase):
|
||||
def __init__(self, key_values={}, internal_metadata_dict={}):
|
||||
signatures = copy.deepcopy(key_values.pop("signatures", {}))
|
||||
unsigned = copy.deepcopy(key_values.pop("unsigned", {}))
|
||||
@attr.s(slots=True, cmp=False, frozen=True)
|
||||
class EventBuilder(object):
|
||||
"""A format independent event builder used to build up the event content
|
||||
before signing the event.
|
||||
|
||||
super(EventBuilder, self).__init__(
|
||||
key_values,
|
||||
signatures=signatures,
|
||||
unsigned=unsigned,
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
(Note that while objects of this class are frozen, the
|
||||
content/unsigned/internal_metadata fields are still mutable)
|
||||
|
||||
Attributes:
|
||||
format_version (int): Event format version
|
||||
room_id (str)
|
||||
type (str)
|
||||
sender (str)
|
||||
content (dict)
|
||||
unsigned (dict)
|
||||
internal_metadata (_EventInternalMetadata)
|
||||
|
||||
_state (StateHandler)
|
||||
_auth (synapse.api.Auth)
|
||||
_store (DataStore)
|
||||
_clock (Clock)
|
||||
_hostname (str): The hostname of the server creating the event
|
||||
_signing_key: The signing key to use to sign the event as the server
|
||||
"""
|
||||
|
||||
_state = attr.ib()
|
||||
_auth = attr.ib()
|
||||
_store = attr.ib()
|
||||
_clock = attr.ib()
|
||||
_hostname = attr.ib()
|
||||
_signing_key = attr.ib()
|
||||
|
||||
format_version = attr.ib()
|
||||
|
||||
room_id = attr.ib()
|
||||
type = attr.ib()
|
||||
sender = attr.ib()
|
||||
|
||||
content = attr.ib(default=attr.Factory(dict))
|
||||
unsigned = attr.ib(default=attr.Factory(dict))
|
||||
|
||||
# These only exist on a subset of events, so they raise AttributeError if
|
||||
# someone tries to get them when they don't exist.
|
||||
_state_key = attr.ib(default=None)
|
||||
_redacts = attr.ib(default=None)
|
||||
|
||||
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
|
||||
|
||||
@property
|
||||
def state_key(self):
|
||||
if self._state_key is not None:
|
||||
return self._state_key
|
||||
|
||||
raise AttributeError("state_key")
|
||||
|
||||
def is_state(self):
|
||||
return self._state_key is not None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def build(self, prev_event_ids):
|
||||
"""Transform into a fully signed and hashed event
|
||||
|
||||
Args:
|
||||
prev_event_ids (list[str]): The event IDs to use as the prev events
|
||||
|
||||
Returns:
|
||||
Deferred[FrozenEvent]
|
||||
"""
|
||||
|
||||
state_ids = yield self._state.get_current_state_ids(
|
||||
self.room_id, prev_event_ids,
|
||||
)
|
||||
auth_ids = yield self._auth.compute_auth_events(
|
||||
self, state_ids,
|
||||
)
|
||||
|
||||
event_id = _event_dict_property("event_id")
|
||||
state_key = _event_dict_property("state_key")
|
||||
type = _event_dict_property("type")
|
||||
if self.format_version == EventFormatVersions.V1:
|
||||
auth_events = yield self._store.add_event_hashes(auth_ids)
|
||||
prev_events = yield self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
auth_events = auth_ids
|
||||
prev_events = prev_event_ids
|
||||
|
||||
def build(self):
|
||||
return FrozenEvent.from_event(self)
|
||||
old_depth = yield self._store.get_max_depth_of(
|
||||
prev_event_ids,
|
||||
)
|
||||
depth = old_depth + 1
|
||||
|
||||
# we cap depth of generated events, to ensure that they are not
|
||||
# rejected by other servers (and so that they can be persisted in
|
||||
# the db)
|
||||
depth = min(depth, MAX_DEPTH)
|
||||
|
||||
event_dict = {
|
||||
"auth_events": auth_events,
|
||||
"prev_events": prev_events,
|
||||
"type": self.type,
|
||||
"room_id": self.room_id,
|
||||
"sender": self.sender,
|
||||
"content": self.content,
|
||||
"unsigned": self.unsigned,
|
||||
"depth": depth,
|
||||
"prev_state": [],
|
||||
}
|
||||
|
||||
if self.is_state():
|
||||
event_dict["state_key"] = self._state_key
|
||||
|
||||
if self._redacts is not None:
|
||||
event_dict["redacts"] = self._redacts
|
||||
|
||||
defer.returnValue(
|
||||
create_local_event_from_event_dict(
|
||||
clock=self._clock,
|
||||
hostname=self._hostname,
|
||||
signing_key=self._signing_key,
|
||||
format_version=self.format_version,
|
||||
event_dict=event_dict,
|
||||
internal_metadata_dict=self.internal_metadata.get_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class EventBuilderFactory(object):
|
||||
def __init__(self, clock, hostname):
|
||||
self.clock = clock
|
||||
self.hostname = hostname
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.hostname = hs.hostname
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
|
||||
self.event_id_count = 0
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
def create_event_id(self):
|
||||
i = str(self.event_id_count)
|
||||
self.event_id_count += 1
|
||||
def new(self, room_version, key_values):
|
||||
"""Generate an event builder appropriate for the given room version
|
||||
|
||||
local_part = str(int(self.clock.time())) + i + random_string(5)
|
||||
Args:
|
||||
room_version (str): Version of the room that we're creating an
|
||||
event builder for
|
||||
key_values (dict): Fields used as the basis of the new event
|
||||
|
||||
e_id = EventID(local_part, self.hostname)
|
||||
Returns:
|
||||
EventBuilder
|
||||
"""
|
||||
|
||||
return e_id.to_string()
|
||||
# There's currently only the one event version defined
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise Exception(
|
||||
"No event format defined for version %r" % (room_version,)
|
||||
)
|
||||
|
||||
def new(self, key_values={}):
|
||||
key_values["event_id"] = self.create_event_id()
|
||||
return EventBuilder(
|
||||
store=self.store,
|
||||
state=self.state,
|
||||
auth=self.auth,
|
||||
clock=self.clock,
|
||||
hostname=self.hostname,
|
||||
signing_key=self.signing_key,
|
||||
format_version=room_version_to_event_format(room_version),
|
||||
type=key_values["type"],
|
||||
state_key=key_values.get("state_key"),
|
||||
room_id=key_values["room_id"],
|
||||
sender=key_values["sender"],
|
||||
content=key_values.get("content", {}),
|
||||
unsigned=key_values.get("unsigned", {}),
|
||||
redacts=key_values.get("redacts", None),
|
||||
)
|
||||
|
||||
time_now = int(self.clock.time_msec())
|
||||
|
||||
key_values.setdefault("origin", self.hostname)
|
||||
key_values.setdefault("origin_server_ts", time_now)
|
||||
def create_local_event_from_event_dict(clock, hostname, signing_key,
|
||||
format_version, event_dict,
|
||||
internal_metadata_dict=None):
|
||||
"""Takes a fully formed event dict, ensuring that fields like `origin`
|
||||
and `origin_server_ts` have correct values for a locally produced event,
|
||||
then signs and hashes it.
|
||||
|
||||
key_values.setdefault("unsigned", {})
|
||||
age = key_values["unsigned"].pop("age", 0)
|
||||
key_values["unsigned"].setdefault("age_ts", time_now - age)
|
||||
Args:
|
||||
clock (Clock)
|
||||
hostname (str)
|
||||
signing_key
|
||||
format_version (int)
|
||||
event_dict (dict)
|
||||
internal_metadata_dict (dict|None)
|
||||
|
||||
key_values["signatures"] = {}
|
||||
Returns:
|
||||
FrozenEvent
|
||||
"""
|
||||
|
||||
return EventBuilder(key_values=key_values,)
|
||||
# There's currently only the one event version defined
|
||||
if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
|
||||
raise Exception(
|
||||
"No event format defined for version %r" % (format_version,)
|
||||
)
|
||||
|
||||
if internal_metadata_dict is None:
|
||||
internal_metadata_dict = {}
|
||||
|
||||
time_now = int(clock.time_msec())
|
||||
|
||||
if format_version == EventFormatVersions.V1:
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
event_dict["origin_server_ts"] = time_now
|
||||
|
||||
event_dict.setdefault("unsigned", {})
|
||||
age = event_dict["unsigned"].pop("age", 0)
|
||||
event_dict["unsigned"].setdefault("age_ts", time_now - age)
|
||||
|
||||
event_dict.setdefault("signatures", {})
|
||||
|
||||
add_hashes_and_signatures(
|
||||
event_dict,
|
||||
hostname,
|
||||
signing_key,
|
||||
)
|
||||
return event_type_from_format_version(format_version)(
|
||||
event_dict, internal_metadata_dict=internal_metadata_dict,
|
||||
)
|
||||
|
||||
|
||||
# A counter used when generating new event IDs
|
||||
_event_id_counter = 0
|
||||
|
||||
|
||||
def _create_event_id(clock, hostname):
|
||||
"""Create a new event ID
|
||||
|
||||
Args:
|
||||
clock (Clock)
|
||||
hostname (str): The server name for the event ID
|
||||
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
|
||||
global _event_id_counter
|
||||
|
||||
i = str(_event_id_counter)
|
||||
_event_id_counter += 1
|
||||
|
||||
local_part = str(int(clock.time())) + i + random_string(5)
|
||||
|
||||
e_id = EventID(local_part, hostname)
|
||||
|
||||
return e_id.to_string()
|
||||
|
||||
@@ -38,8 +38,31 @@ def prune_event(event):
|
||||
This is used when we "redact" an event. We want to remove all fields that
|
||||
the user has specified, but we do want to keep necessary information like
|
||||
type, state_key etc.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent)
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
"""
|
||||
pruned_event_dict = prune_event_dict(event.get_dict())
|
||||
|
||||
from . import event_type_from_format_version
|
||||
return event_type_from_format_version(event.format_version)(
|
||||
pruned_event_dict, event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
|
||||
def prune_event_dict(event_dict):
|
||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||
operates on dicts rather than event objects
|
||||
|
||||
Args:
|
||||
event_dict (dict)
|
||||
|
||||
Returns:
|
||||
dict: A copy of the pruned event dict
|
||||
"""
|
||||
event_type = event.type
|
||||
|
||||
allowed_keys = [
|
||||
"event_id",
|
||||
@@ -59,13 +82,13 @@ def prune_event(event):
|
||||
"membership",
|
||||
]
|
||||
|
||||
event_dict = event.get_dict()
|
||||
event_type = event_dict["type"]
|
||||
|
||||
new_content = {}
|
||||
|
||||
def add_fields(*fields):
|
||||
for field in fields:
|
||||
if field in event.content:
|
||||
if field in event_dict["content"]:
|
||||
new_content[field] = event_dict["content"][field]
|
||||
|
||||
if event_type == EventTypes.Member:
|
||||
@@ -98,17 +121,17 @@ def prune_event(event):
|
||||
|
||||
allowed_fields["content"] = new_content
|
||||
|
||||
allowed_fields["unsigned"] = {}
|
||||
unsigned = {}
|
||||
allowed_fields["unsigned"] = unsigned
|
||||
|
||||
if "age_ts" in event.unsigned:
|
||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||
if "replaces_state" in event.unsigned:
|
||||
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
|
||||
event_unsigned = event_dict.get("unsigned", {})
|
||||
|
||||
return type(event)(
|
||||
allowed_fields,
|
||||
internal_metadata_dict=event.internal_metadata.get_dict()
|
||||
)
|
||||
if "age_ts" in event_unsigned:
|
||||
unsigned["age_ts"] = event_unsigned["age_ts"]
|
||||
if "replaces_state" in event_unsigned:
|
||||
unsigned["replaces_state"] = event_unsigned["replaces_state"]
|
||||
|
||||
return allowed_fields
|
||||
|
||||
|
||||
def _copy_field(src, dst, field):
|
||||
@@ -244,6 +267,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
if not isinstance(e, EventBase):
|
||||
return e
|
||||
@@ -253,6 +277,8 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
# Should this strip out None's?
|
||||
d = {k: v for k, v in e.get_dict().items()}
|
||||
|
||||
d["event_id"] = e.event_id
|
||||
|
||||
if "age_ts" in d["unsigned"]:
|
||||
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
|
||||
del d["unsigned"]["age_ts"]
|
||||
|
||||
@@ -15,23 +15,29 @@
|
||||
|
||||
from six import string_types
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EventFormatVersions, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator(object):
|
||||
def validate_new(self, event):
|
||||
"""Validates the event has roughly the right format
|
||||
|
||||
def validate(self, event):
|
||||
EventID.from_string(event.event_id)
|
||||
RoomID.from_string(event.room_id)
|
||||
Args:
|
||||
event (FrozenEvent)
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
if event.format_version == EventFormatVersions.V1:
|
||||
EventID.from_string(event.event_id)
|
||||
|
||||
required = [
|
||||
# "auth_events",
|
||||
"auth_events",
|
||||
"content",
|
||||
# "hashes",
|
||||
"hashes",
|
||||
"origin",
|
||||
# "prev_events",
|
||||
"prev_events",
|
||||
"sender",
|
||||
"type",
|
||||
]
|
||||
@@ -41,8 +47,25 @@ class EventValidator(object):
|
||||
raise SynapseError(400, "Event does not have key %s" % (k,))
|
||||
|
||||
# Check that the following keys have string values
|
||||
strings = [
|
||||
event_strings = [
|
||||
"origin",
|
||||
]
|
||||
|
||||
for s in event_strings:
|
||||
if not isinstance(getattr(event, s), string_types):
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
def validate_builder(self, event):
|
||||
"""Validates that the builder/event has roughly the right format. Only
|
||||
checks values that we expect a proto event to have, rather than all the
|
||||
fields an event would have
|
||||
|
||||
Args:
|
||||
event (EventBuilder|FrozenEvent)
|
||||
"""
|
||||
|
||||
strings = [
|
||||
"room_id",
|
||||
"sender",
|
||||
"type",
|
||||
]
|
||||
@@ -54,22 +77,7 @@ class EventValidator(object):
|
||||
if not isinstance(getattr(event, s), string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if "membership" not in event.content:
|
||||
raise SynapseError(400, "Content has not membership key")
|
||||
|
||||
if event.content["membership"] not in Membership.LIST:
|
||||
raise SynapseError(400, "Invalid membership key")
|
||||
|
||||
# Check that the following keys have dictionary values
|
||||
# TODO
|
||||
|
||||
# Check that the following keys have the correct format for DAGs
|
||||
# TODO
|
||||
|
||||
def validate_new(self, event):
|
||||
self.validate(event)
|
||||
|
||||
RoomID.from_string(event.room_id)
|
||||
UserID.from_string(event.sender)
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
@@ -86,9 +94,16 @@ class EventValidator(object):
|
||||
elif event.type == EventTypes.Name:
|
||||
self._ensure_strings(event.content, ["name"])
|
||||
|
||||
elif event.type == EventTypes.Member:
|
||||
if "membership" not in event.content:
|
||||
raise SynapseError(400, "Content has not membership key")
|
||||
|
||||
if event.content["membership"] not in Membership.LIST:
|
||||
raise SynapseError(400, "Invalid membership key")
|
||||
|
||||
def _ensure_strings(self, d, keys):
|
||||
for s in keys:
|
||||
if s not in d:
|
||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||
if not isinstance(d[s], string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
@@ -20,10 +20,10 @@ import six
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import DeferredList
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership, RoomVersions
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events import event_type_from_format_version
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.types import get_domain_from_id
|
||||
@@ -43,8 +43,8 @@ class FederationBase(object):
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
include_none=False):
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, room_version,
|
||||
outlier=False, include_none=False):
|
||||
"""Takes a list of PDUs and checks the signatures and hashs of each
|
||||
one. If a PDU fails its signature check then we check if we have it in
|
||||
the database and if not then request if from the originating server of
|
||||
@@ -56,13 +56,17 @@ class FederationBase(object):
|
||||
a new list.
|
||||
|
||||
Args:
|
||||
origin (str)
|
||||
pdu (list)
|
||||
outlier (bool)
|
||||
room_version (str)
|
||||
outlier (bool): Whether the events are outliers or not
|
||||
include_none (str): Whether to include None in the returned list
|
||||
for events that have failed their checks
|
||||
|
||||
Returns:
|
||||
Deferred : A list of PDUs that have valid signatures and hashes.
|
||||
"""
|
||||
deferreds = self._check_sigs_and_hashes(pdus)
|
||||
deferreds = self._check_sigs_and_hashes(room_version, pdus)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_check_result(pdu, deferred):
|
||||
@@ -84,6 +88,7 @@ class FederationBase(object):
|
||||
res = yield self.get_pdu(
|
||||
destinations=[pdu.origin],
|
||||
event_id=pdu.event_id,
|
||||
room_version=room_version,
|
||||
outlier=outlier,
|
||||
timeout=10000,
|
||||
)
|
||||
@@ -116,16 +121,17 @@ class FederationBase(object):
|
||||
else:
|
||||
defer.returnValue([p for p in valid_pdus if p])
|
||||
|
||||
def _check_sigs_and_hash(self, pdu):
|
||||
def _check_sigs_and_hash(self, room_version, pdu):
|
||||
return logcontext.make_deferred_yieldable(
|
||||
self._check_sigs_and_hashes([pdu])[0],
|
||||
self._check_sigs_and_hashes(room_version, [pdu])[0],
|
||||
)
|
||||
|
||||
def _check_sigs_and_hashes(self, pdus):
|
||||
def _check_sigs_and_hashes(self, room_version, pdus):
|
||||
"""Checks that each of the received events is correctly signed by the
|
||||
sending server.
|
||||
|
||||
Args:
|
||||
room_version (str): The room version of the PDUs
|
||||
pdus (list[FrozenEvent]): the events to be checked
|
||||
|
||||
Returns:
|
||||
@@ -136,7 +142,7 @@ class FederationBase(object):
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel logcontext.
|
||||
"""
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||
|
||||
ctx = logcontext.LoggingContext.current_context()
|
||||
|
||||
@@ -198,16 +204,17 @@ class FederationBase(object):
|
||||
|
||||
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", [
|
||||
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
|
||||
"pdu", "redacted_pdu_json", "sender_domain", "deferreds",
|
||||
])):
|
||||
pass
|
||||
|
||||
|
||||
def _check_sigs_on_pdus(keyring, pdus):
|
||||
def _check_sigs_on_pdus(keyring, room_version, pdus):
|
||||
"""Check that the given events are correctly signed
|
||||
|
||||
Args:
|
||||
keyring (synapse.crypto.Keyring): keyring object to do the checks
|
||||
room_version (str): the room version of the PDUs
|
||||
pdus (Collection[EventBase]): the events to be checked
|
||||
|
||||
Returns:
|
||||
@@ -220,9 +227,7 @@ def _check_sigs_on_pdus(keyring, pdus):
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
#
|
||||
# (a) the server which created the event_id
|
||||
#
|
||||
# (b) the sender's server.
|
||||
# (a) the sender's server
|
||||
#
|
||||
# - except in the case of invites created from a 3pid invite, which are exempt
|
||||
# from this check, because the sender has to match that of the original 3pid
|
||||
@@ -236,34 +241,26 @@ def _check_sigs_on_pdus(keyring, pdus):
|
||||
# and signatures are *supposed* to be valid whether or not an event has been
|
||||
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
|
||||
#
|
||||
# (b) for V1 and V2 rooms, the server which created the event_id
|
||||
#
|
||||
# let's start by getting the domain for each pdu, and flattening the event back
|
||||
# to JSON.
|
||||
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
event_id_domain=get_domain_from_id(p.event_id),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
for p in pdus
|
||||
]
|
||||
|
||||
# first make sure that the event is signed by the event_id's domain
|
||||
deferreds = keyring.verify_json_objects_for_server([
|
||||
(p.event_id_domain, p.redacted_pdu_json)
|
||||
for p in pdus_to_check
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check, deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks.
|
||||
# First we check that the sender event is signed by the sender's domain
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
pdus_to_check_sender = [
|
||||
p for p in pdus_to_check
|
||||
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
|
||||
if not _is_invite_via_3pid(p.pdu)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
@@ -274,19 +271,43 @@ def _check_sigs_on_pdus(keyring, pdus):
|
||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||
p.deferreds.append(d)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
if room_version in (
|
||||
RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
|
||||
):
|
||||
pdus_to_check_event_id = [
|
||||
p for p in pdus_to_check
|
||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server([
|
||||
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
|
||||
for p in pdus_to_check_event_id
|
||||
])
|
||||
|
||||
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
||||
p.deferreds.append(d)
|
||||
elif room_version in (RoomVersions.V3,):
|
||||
pass # No further checks needed, as event IDs are hashes here
|
||||
else:
|
||||
raise RuntimeError("Unrecognized room version %s" % (room_version,))
|
||||
|
||||
# replace lists of deferreds with single Deferreds
|
||||
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
||||
|
||||
|
||||
def _flatten_deferred_list(deferreds):
|
||||
"""Given a list of one or more deferreds, either return the single deferred, or
|
||||
combine into a DeferredList.
|
||||
"""Given a list of deferreds, either return the single deferred,
|
||||
combine into a DeferredList, or return an already resolved deferred.
|
||||
"""
|
||||
if len(deferreds) > 1:
|
||||
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
||||
else:
|
||||
assert len(deferreds) == 1
|
||||
elif len(deferreds) == 1:
|
||||
return deferreds[0]
|
||||
else:
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
def _is_invite_via_3pid(event):
|
||||
@@ -297,11 +318,12 @@ def _is_invite_via_3pid(event):
|
||||
)
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, outlier=False):
|
||||
def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
|
||||
Args:
|
||||
pdu_json (object): pdu as received over federation
|
||||
event_format_version (int): The event format version
|
||||
outlier (bool): True to mark this event as an outlier
|
||||
|
||||
Returns:
|
||||
@@ -313,7 +335,7 @@ def event_from_pdu_json(pdu_json, outlier=False):
|
||||
"""
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||
# origin, etc etc)
|
||||
assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
|
||||
assert_params_in_dict(pdu_json, ('type', 'depth'))
|
||||
|
||||
depth = pdu_json['depth']
|
||||
if not isinstance(depth, six.integer_types):
|
||||
@@ -325,8 +347,8 @@ def event_from_pdu_json(pdu_json, outlier=False):
|
||||
elif depth > MAX_DEPTH:
|
||||
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
|
||||
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
event = event_type_from_format_version(event_format_version)(
|
||||
pdu_json,
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
@@ -25,14 +25,19 @@ from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
||||
from synapse.api.constants import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventTypes,
|
||||
Membership,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.events import builder
|
||||
from synapse.events import builder, room_version_to_event_format
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -66,6 +71,9 @@ class FederationClient(FederationBase):
|
||||
self.state = hs.get_state_handler()
|
||||
self.transport_layer = hs.get_federation_transport_client()
|
||||
|
||||
self.hostname = hs.hostname
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
cache_name="get_pdu_cache",
|
||||
clock=self._clock,
|
||||
@@ -162,13 +170,13 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def backfill(self, dest, context, limit, extremities):
|
||||
def backfill(self, dest, room_id, limit, extremities):
|
||||
"""Requests some more historic PDUs for the given context from the
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote home server to ask.
|
||||
context (str): The context to backfill.
|
||||
room_id (str): The room_id to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
we have seen from the context
|
||||
@@ -183,18 +191,21 @@ class FederationClient(FederationBase):
|
||||
return
|
||||
|
||||
transaction_data = yield self.transport_layer.backfill(
|
||||
dest, context, extremities, limit)
|
||||
dest, room_id, extremities, limit)
|
||||
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdus = [
|
||||
event_from_pdu_json(p, outlier=False)
|
||||
event_from_pdu_json(p, format_ver, outlier=False)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
self._check_sigs_and_hashes(pdus),
|
||||
self._check_sigs_and_hashes(room_version, pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError))
|
||||
|
||||
@@ -202,7 +213,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
|
||||
def get_pdu(self, destinations, event_id, room_version, outlier=False,
|
||||
timeout=None):
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
servers.
|
||||
|
||||
@@ -212,6 +224,7 @@ class FederationClient(FederationBase):
|
||||
Args:
|
||||
destinations (list): Which home servers to query
|
||||
event_id (str): event to fetch
|
||||
room_version (str): version of the room
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
@@ -230,6 +243,8 @@ class FederationClient(FederationBase):
|
||||
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
@@ -245,7 +260,7 @@ class FederationClient(FederationBase):
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
|
||||
pdu_list = [
|
||||
event_from_pdu_json(p, outlier=outlier)
|
||||
event_from_pdu_json(p, format_ver, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
@@ -253,7 +268,7 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
signed_pdu = yield self._check_sigs_and_hash(pdu)
|
||||
signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
break
|
||||
|
||||
@@ -339,12 +354,16 @@ class FederationClient(FederationBase):
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdus = [
|
||||
event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||
event_from_pdu_json(p, format_ver, outlier=True)
|
||||
for p in result["pdus"]
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, format_ver, outlier=True)
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
@@ -355,7 +374,8 @@ class FederationClient(FederationBase):
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
[p for p in pdus if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
signed_pdus.extend(
|
||||
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||
@@ -364,7 +384,8 @@ class FederationClient(FederationBase):
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
[p for p in auth_chain if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
signed_auth.extend(
|
||||
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||
@@ -411,6 +432,8 @@ class FederationClient(FederationBase):
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in range(0, len(missing_events), batch_size):
|
||||
@@ -421,6 +444,7 @@ class FederationClient(FederationBase):
|
||||
self.get_pdu,
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
room_version=room_version,
|
||||
)
|
||||
for e_id in batch
|
||||
]
|
||||
@@ -445,13 +469,17 @@ class FederationClient(FederationBase):
|
||||
destination, room_id, event_id,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, format_ver, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
destination, auth_chain,
|
||||
outlier=True, room_version=room_version,
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
@@ -522,6 +550,8 @@ class FederationClient(FederationBase):
|
||||
Does so by asking one of the already participating servers to create an
|
||||
event with proper context.
|
||||
|
||||
Returns a fully signed and hashed event.
|
||||
|
||||
Note that this does not append any events to any graphs.
|
||||
|
||||
Args:
|
||||
@@ -536,8 +566,10 @@ class FederationClient(FederationBase):
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
Return:
|
||||
Deferred: resolves to a tuple of (origin (str), event (object))
|
||||
where origin is the remote homeserver which generated the event.
|
||||
Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
|
||||
`(origin, event, event_format)` where origin is the remote
|
||||
homeserver which generated the event, and event_format is one of
|
||||
`synapse.api.constants.EventFormatVersions`.
|
||||
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
@@ -557,6 +589,11 @@ class FederationClient(FederationBase):
|
||||
destination, room_id, user_id, membership, params,
|
||||
)
|
||||
|
||||
# Note: If not supplied, the room version may be either v1 or v2,
|
||||
# however either way the event format version will be v1.
|
||||
room_version = ret.get("room_version", RoomVersions.V1)
|
||||
event_format = room_version_to_event_format(room_version)
|
||||
|
||||
pdu_dict = ret.get("event", None)
|
||||
if not isinstance(pdu_dict, dict):
|
||||
raise InvalidResponseError("Bad 'event' field in response")
|
||||
@@ -571,17 +608,20 @@ class FederationClient(FederationBase):
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
|
||||
ev = builder.EventBuilder(pdu_dict)
|
||||
ev = builder.create_local_event_from_event_dict(
|
||||
self._clock, self.hostname, self.signing_key,
|
||||
format_version=event_format, event_dict=pdu_dict,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(destination, ev)
|
||||
(destination, ev, event_format)
|
||||
)
|
||||
|
||||
return self._try_destination_list(
|
||||
"make_" + membership, destinations, send_request,
|
||||
)
|
||||
|
||||
def send_join(self, destinations, pdu):
|
||||
def send_join(self, destinations, pdu, event_format_version):
|
||||
"""Sends a join event to one of a list of homeservers.
|
||||
|
||||
Doing so will cause the remote server to add the event to the graph,
|
||||
@@ -591,6 +631,7 @@ class FederationClient(FederationBase):
|
||||
destinations (str): Candidate homeservers which are probably
|
||||
participating in the room.
|
||||
pdu (BaseEvent): event to be sent
|
||||
event_format_version (int): The event format version
|
||||
|
||||
Return:
|
||||
Deferred: resolves to a dict with members ``origin`` (a string
|
||||
@@ -636,12 +677,12 @@ class FederationClient(FederationBase):
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, event_format_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
@@ -650,9 +691,21 @@ class FederationClient(FederationBase):
|
||||
for p in itertools.chain(state, auth_chain)
|
||||
}
|
||||
|
||||
room_version = None
|
||||
for e in state:
|
||||
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
||||
room_version = e.content.get("room_version", RoomVersions.V1)
|
||||
break
|
||||
|
||||
if room_version is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
|
||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, list(pdus.values()),
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
|
||||
valid_pdus_map = {
|
||||
@@ -690,32 +743,75 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
try:
|
||||
code, content = yield self.transport_layer.send_invite(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
raise
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
content = yield self._do_send_invite(destination, pdu, room_version)
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
pdu = event_from_pdu_json(pdu_dict)
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(pdu_dict, format_ver)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_send_invite(self, destination, pdu, room_version):
|
||||
"""Actually sends the invite, first trying v2 API and falling back to
|
||||
v1 API if necessary.
|
||||
|
||||
Args:
|
||||
destination (str): Target server
|
||||
pdu (FrozenEvent)
|
||||
room_version (str)
|
||||
|
||||
Returns:
|
||||
dict: The event as a dict as returned by the remote server
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
content = yield self.transport_layer.send_invite_v2(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content={
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
"room_version": room_version,
|
||||
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
||||
},
|
||||
)
|
||||
defer.returnValue(content)
|
||||
except HttpResponseException as e:
|
||||
if e.code in [400, 404]:
|
||||
if room_version in (RoomVersions.V1, RoomVersions.V2):
|
||||
pass # We'll fall through
|
||||
else:
|
||||
raise Exception("Remote server is too old")
|
||||
elif e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
raise
|
||||
|
||||
# Didn't work, try v1 API.
|
||||
# Note the v1 API returns a tuple of `(200, content)`
|
||||
|
||||
_, content = yield self.transport_layer.send_invite_v1(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
def send_leave(self, destinations, pdu):
|
||||
"""Sends a leave event to one of a list of homeservers.
|
||||
|
||||
@@ -785,13 +881,16 @@ class FederationClient(FederationBase):
|
||||
content=send_content,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e)
|
||||
event_from_pdu_json(e, format_ver)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
destination, auth_chain, outlier=True, room_version=room_version,
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
@@ -833,13 +932,16 @@ class FederationClient(FederationBase):
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
events = [
|
||||
event_from_pdu_json(e)
|
||||
event_from_pdu_json(e, format_ver)
|
||||
for e in content.get("events", [])
|
||||
]
|
||||
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False
|
||||
destination, events, outlier=False, room_version=room_version,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
|
||||
@@ -25,7 +25,7 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
FederationError,
|
||||
@@ -34,6 +34,7 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events import room_version_to_event_format
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
@@ -178,14 +179,13 @@ class FederationServer(FederationBase):
|
||||
continue
|
||||
|
||||
try:
|
||||
# In future we will actually use the room version to parse the
|
||||
# PDU into an event.
|
||||
yield self.store.get_room_version(room_id)
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p)
|
||||
event = event_from_pdu_json(p, format_ver)
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
pdu_results = {}
|
||||
@@ -322,7 +322,7 @@ class FederationServer(FederationBase):
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
event.get_pdu_json(),
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
@@ -369,18 +369,23 @@ class FederationServer(FederationBase):
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content):
|
||||
pdu = event_from_pdu_json(content)
|
||||
def on_invite_request(self, origin, content, room_version):
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
|
||||
defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content):
|
||||
def on_send_join_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
@@ -400,13 +405,22 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
|
||||
defer.returnValue({
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
"room_version": room_version,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_leave_request(self, origin, content):
|
||||
def on_send_leave_request(self, origin, content, room_id):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
pdu = event_from_pdu_json(content, format_ver)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
@@ -452,13 +466,16 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
format_ver = room_version_to_event_format(room_version)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e)
|
||||
event_from_pdu_json(e, format_ver)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
origin, auth_chain, outlier=True, room_version=room_version,
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
@@ -603,16 +620,19 @@ class FederationServer(FederationBase):
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.event_id):
|
||||
if origin != get_domain_from_id(pdu.sender):
|
||||
# We continue to accept join events from any server; this is
|
||||
# necessary for the federation join dance to work correctly.
|
||||
# (When we join over federation, the "helper" server is
|
||||
# responsible for sending out the join event, rather than the
|
||||
# origin. See bug #1893).
|
||||
# origin. See bug #1893. This is also true for some third party
|
||||
# invites).
|
||||
if not (
|
||||
pdu.type == 'm.room.member' and
|
||||
pdu.content and
|
||||
pdu.content.get("membership", None) == 'join'
|
||||
pdu.content.get("membership", None) in (
|
||||
Membership.JOIN, Membership.INVITE,
|
||||
)
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s",
|
||||
@@ -625,9 +645,12 @@ class FederationServer(FederationBase):
|
||||
pdu.event_id, origin
|
||||
)
|
||||
|
||||
# We've already checked that we know the room version by this point
|
||||
room_version = yield self.store.get_room_version(pdu.room_id)
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
pdu = yield self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
|
||||
@@ -22,7 +22,11 @@ from prometheus_client import Counter
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.errors import FederationDeniedError, HttpResponseException
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
)
|
||||
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
|
||||
from synapse.metrics import (
|
||||
LaterGauge,
|
||||
@@ -171,7 +175,7 @@ class TransactionQueue(object):
|
||||
def handle_event(event):
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.event_id)
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
return
|
||||
|
||||
@@ -518,11 +522,21 @@ class TransactionQueue(object):
|
||||
)
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"TX [%s] Failed to send transaction: %s",
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"TX [%s] Received %d response to transaction: %s",
|
||||
destination, e.code, e,
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warning("TX [%s] Failed to send transaction: %s", destination, e)
|
||||
|
||||
for p, _ in pending_pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id,
|
||||
destination)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"TX [%s] Failed to send transaction",
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
for p, _ in pending_pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id,
|
||||
|
||||
@@ -21,7 +21,7 @@ from six.moves import urllib
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -51,7 +51,7 @@ class TransportLayerClient(object):
|
||||
logger.debug("get_room_state dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = _create_path(PREFIX, "/state/%s/", room_id)
|
||||
path = _create_v1_path("/state/%s/", room_id)
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
@@ -73,7 +73,7 @@ class TransportLayerClient(object):
|
||||
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = _create_path(PREFIX, "/state_ids/%s/", room_id)
|
||||
path = _create_v1_path("/state_ids/%s/", room_id)
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
@@ -95,7 +95,7 @@ class TransportLayerClient(object):
|
||||
logger.debug("get_pdu dest=%s, event_id=%s",
|
||||
destination, event_id)
|
||||
|
||||
path = _create_path(PREFIX, "/event/%s/", event_id)
|
||||
path = _create_v1_path("/event/%s/", event_id)
|
||||
return self.client.get_json(destination, path=path, timeout=timeout)
|
||||
|
||||
@log_function
|
||||
@@ -121,7 +121,7 @@ class TransportLayerClient(object):
|
||||
# TODO: raise?
|
||||
return
|
||||
|
||||
path = _create_path(PREFIX, "/backfill/%s/", room_id)
|
||||
path = _create_v1_path("/backfill/%s/", room_id)
|
||||
|
||||
args = {
|
||||
"v": event_tuples,
|
||||
@@ -167,7 +167,7 @@ class TransportLayerClient(object):
|
||||
# generated by the json_data_callback.
|
||||
json_data = transaction.get_dict()
|
||||
|
||||
path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id)
|
||||
path = _create_v1_path("/send/%s/", transaction.transaction_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
transaction.destination,
|
||||
@@ -184,7 +184,7 @@ class TransportLayerClient(object):
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail,
|
||||
ignore_backoff=False):
|
||||
path = _create_path(PREFIX, "/query/%s", query_type)
|
||||
path = _create_v1_path("/query/%s", query_type)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -231,7 +231,7 @@ class TransportLayerClient(object):
|
||||
"make_membership_event called with membership='%s', must be one of %s" %
|
||||
(membership, ",".join(valid_memberships))
|
||||
)
|
||||
path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id)
|
||||
path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id)
|
||||
|
||||
ignore_backoff = False
|
||||
retry_on_dns_fail = False
|
||||
@@ -258,7 +258,7 @@ class TransportLayerClient(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join(self, destination, room_id, event_id, content):
|
||||
path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id)
|
||||
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
@@ -271,7 +271,7 @@ class TransportLayerClient(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_leave(self, destination, room_id, event_id, content):
|
||||
path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id)
|
||||
path = _create_v1_path("/send_leave/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
@@ -289,8 +289,22 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite(self, destination, room_id, event_id, content):
|
||||
path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id)
|
||||
def send_invite_v1(self, destination, room_id, event_id, content):
|
||||
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite_v2(self, destination, room_id, event_id, content):
|
||||
path = _create_v2_path("/invite/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
@@ -306,7 +320,7 @@ class TransportLayerClient(object):
|
||||
def get_public_rooms(self, remote_server, limit, since_token,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None):
|
||||
path = PREFIX + "/publicRooms"
|
||||
path = _create_v1_path("/publicRooms")
|
||||
|
||||
args = {
|
||||
"include_all_networks": "true" if include_all_networks else "false",
|
||||
@@ -332,7 +346,7 @@ class TransportLayerClient(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def exchange_third_party_invite(self, destination, room_id, event_dict):
|
||||
path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,)
|
||||
path = _create_v1_path("/exchange_third_party_invite/%s", room_id,)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
@@ -345,7 +359,7 @@ class TransportLayerClient(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id)
|
||||
path = _create_v1_path("/event_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -357,7 +371,7 @@ class TransportLayerClient(object):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_query_auth(self, destination, room_id, event_id, content):
|
||||
path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id)
|
||||
path = _create_v1_path("/query_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -392,7 +406,7 @@ class TransportLayerClient(object):
|
||||
Returns:
|
||||
A dict containg the device keys.
|
||||
"""
|
||||
path = PREFIX + "/user/keys/query"
|
||||
path = _create_v1_path("/user/keys/query")
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -419,7 +433,7 @@ class TransportLayerClient(object):
|
||||
Returns:
|
||||
A dict containg the device keys.
|
||||
"""
|
||||
path = _create_path(PREFIX, "/user/devices/%s", user_id)
|
||||
path = _create_v1_path("/user/devices/%s", user_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -455,7 +469,7 @@ class TransportLayerClient(object):
|
||||
A dict containg the one-time keys.
|
||||
"""
|
||||
|
||||
path = PREFIX + "/user/keys/claim"
|
||||
path = _create_v1_path("/user/keys/claim")
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -469,7 +483,7 @@ class TransportLayerClient(object):
|
||||
@log_function
|
||||
def get_missing_events(self, destination, room_id, earliest_events,
|
||||
latest_events, limit, min_depth, timeout):
|
||||
path = _create_path(PREFIX, "/get_missing_events/%s", room_id,)
|
||||
path = _create_v1_path("/get_missing_events/%s", room_id,)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -489,7 +503,7 @@ class TransportLayerClient(object):
|
||||
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||
"""Get a group profile
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
|
||||
path = _create_v1_path("/groups/%s/profile", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -508,7 +522,7 @@ class TransportLayerClient(object):
|
||||
requester_user_id (str)
|
||||
content (dict): The new profile of the group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
|
||||
path = _create_v1_path("/groups/%s/profile", group_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -522,7 +536,7 @@ class TransportLayerClient(object):
|
||||
def get_group_summary(self, destination, group_id, requester_user_id):
|
||||
"""Get a group summary
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/summary", group_id,)
|
||||
path = _create_v1_path("/groups/%s/summary", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -535,7 +549,7 @@ class TransportLayerClient(object):
|
||||
def get_rooms_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get all rooms in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/rooms", group_id,)
|
||||
path = _create_v1_path("/groups/%s/rooms", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -548,7 +562,7 @@ class TransportLayerClient(object):
|
||||
content):
|
||||
"""Add a room to a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -562,8 +576,8 @@ class TransportLayerClient(object):
|
||||
config_key, content):
|
||||
"""Update room in group
|
||||
"""
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/room/%s/config/%s",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/room/%s/config/%s",
|
||||
group_id, room_id, config_key,
|
||||
)
|
||||
|
||||
@@ -578,7 +592,7 @@ class TransportLayerClient(object):
|
||||
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
|
||||
"""Remove a room from a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
@@ -591,7 +605,7 @@ class TransportLayerClient(object):
|
||||
def get_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/users", group_id,)
|
||||
path = _create_v1_path("/groups/%s/users", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -604,7 +618,7 @@ class TransportLayerClient(object):
|
||||
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users that have been invited to a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,)
|
||||
path = _create_v1_path("/groups/%s/invited_users", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -617,8 +631,8 @@ class TransportLayerClient(object):
|
||||
def accept_group_invite(self, destination, group_id, user_id, content):
|
||||
"""Accept a group invite
|
||||
"""
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/users/%s/accept_invite",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/users/%s/accept_invite",
|
||||
group_id, user_id,
|
||||
)
|
||||
|
||||
@@ -633,7 +647,7 @@ class TransportLayerClient(object):
|
||||
def join_group(self, destination, group_id, user_id, content):
|
||||
"""Attempts to join a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id)
|
||||
path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -646,7 +660,7 @@ class TransportLayerClient(object):
|
||||
def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
|
||||
"""Invite a user to a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id)
|
||||
path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -662,7 +676,7 @@ class TransportLayerClient(object):
|
||||
invited.
|
||||
"""
|
||||
|
||||
path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id)
|
||||
path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -676,7 +690,7 @@ class TransportLayerClient(object):
|
||||
user_id, content):
|
||||
"""Remove a user fron a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id)
|
||||
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -693,7 +707,7 @@ class TransportLayerClient(object):
|
||||
kicked from the group.
|
||||
"""
|
||||
|
||||
path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id)
|
||||
path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -708,7 +722,7 @@ class TransportLayerClient(object):
|
||||
the attestations
|
||||
"""
|
||||
|
||||
path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id)
|
||||
path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -723,12 +737,12 @@ class TransportLayerClient(object):
|
||||
"""Update a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/categories/%s/rooms/%s",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -744,12 +758,12 @@ class TransportLayerClient(object):
|
||||
"""Delete a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = _create_path(
|
||||
PREFIX + "/groups/%s/summary/categories/%s/rooms/%s",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
@@ -762,7 +776,7 @@ class TransportLayerClient(object):
|
||||
def get_group_categories(self, destination, group_id, requester_user_id):
|
||||
"""Get all categories in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/categories", group_id,)
|
||||
path = _create_v1_path("/groups/%s/categories", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -775,7 +789,7 @@ class TransportLayerClient(object):
|
||||
def get_group_category(self, destination, group_id, requester_user_id, category_id):
|
||||
"""Get category info in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -789,7 +803,7 @@ class TransportLayerClient(object):
|
||||
content):
|
||||
"""Update a category in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -804,7 +818,7 @@ class TransportLayerClient(object):
|
||||
category_id):
|
||||
"""Delete a category in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
@@ -817,7 +831,7 @@ class TransportLayerClient(object):
|
||||
def get_group_roles(self, destination, group_id, requester_user_id):
|
||||
"""Get all roles in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/roles", group_id,)
|
||||
path = _create_v1_path("/groups/%s/roles", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -830,7 +844,7 @@ class TransportLayerClient(object):
|
||||
def get_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Get a roles info
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
@@ -844,7 +858,7 @@ class TransportLayerClient(object):
|
||||
content):
|
||||
"""Update a role in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -858,7 +872,7 @@ class TransportLayerClient(object):
|
||||
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Delete a role in a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
@@ -873,12 +887,12 @@ class TransportLayerClient(object):
|
||||
"""Update a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s",
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
@@ -893,7 +907,7 @@ class TransportLayerClient(object):
|
||||
content):
|
||||
"""Sets the join policy for a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,)
|
||||
path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id,)
|
||||
|
||||
return self.client.put_json(
|
||||
destination=destination,
|
||||
@@ -909,12 +923,12 @@ class TransportLayerClient(object):
|
||||
"""Delete a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s",
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
@@ -927,7 +941,7 @@ class TransportLayerClient(object):
|
||||
"""Get the groups a list of users are publicising
|
||||
"""
|
||||
|
||||
path = PREFIX + "/get_groups_publicised"
|
||||
path = _create_v1_path("/get_groups_publicised")
|
||||
|
||||
content = {"user_ids": user_ids}
|
||||
|
||||
@@ -939,20 +953,43 @@ class TransportLayerClient(object):
|
||||
)
|
||||
|
||||
|
||||
def _create_path(prefix, path, *args):
|
||||
"""Creates a path from the prefix, path template and args. Ensures that
|
||||
all args are url encoded.
|
||||
def _create_v1_path(path, *args):
|
||||
"""Creates a path against V1 federation API from the path template and
|
||||
args. Ensures that all args are url encoded.
|
||||
|
||||
Example:
|
||||
|
||||
_create_path(PREFIX, "/event/%s/", event_id)
|
||||
_create_v1_path("/event/%s/", event_id)
|
||||
|
||||
Args:
|
||||
prefix (str)
|
||||
path (str): String template for the path
|
||||
args: ([str]): Args to insert into path. Each arg will be url encoded
|
||||
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||
return (
|
||||
FEDERATION_V1_PREFIX
|
||||
+ path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||
)
|
||||
|
||||
|
||||
def _create_v2_path(path, *args):
|
||||
"""Creates a path against V2 federation API from the path template and
|
||||
args. Ensures that all args are url encoded.
|
||||
|
||||
Example:
|
||||
|
||||
_create_v2_path("/event/%s/", event_id)
|
||||
|
||||
Args:
|
||||
path (str): String template for the path
|
||||
args: ([str]): Args to insert into path. Each arg will be url encoded
|
||||
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return (
|
||||
FEDERATION_V2_PREFIX
|
||||
+ path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||
)
|
||||
|
||||
@@ -21,8 +21,9 @@ import re
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import RoomVersions
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
@@ -227,6 +228,8 @@ class BaseFederationServlet(object):
|
||||
"""
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
@@ -286,7 +289,7 @@ class BaseFederationServlet(object):
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
|
||||
|
||||
for method in ("GET", "PUT", "POST"):
|
||||
code = getattr(self, "on_%s" % (method), None)
|
||||
@@ -466,7 +469,7 @@ class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, room_id, event_id):
|
||||
content = yield self.handler.on_send_leave_request(origin, content)
|
||||
content = yield self.handler.on_send_leave_request(origin, content, room_id)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
@@ -484,18 +487,50 @@ class FederationSendJoinServlet(BaseFederationServlet):
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
# TODO(paul): assert that context/event_id parsed from path actually
|
||||
# match those given in content
|
||||
content = yield self.handler.on_send_join_request(origin, content)
|
||||
content = yield self.handler.on_send_join_request(origin, content, context)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
class FederationInviteServlet(BaseFederationServlet):
|
||||
class FederationV1InviteServlet(BaseFederationServlet):
|
||||
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
# We don't get a room version, so we have to assume its EITHER v1 or
|
||||
# v2. This is "fine" as the only difference between V1 and V2 is the
|
||||
# state resolution algorithm, and we don't use that for processing
|
||||
# invites
|
||||
content = yield self.handler.on_invite_request(
|
||||
origin, content, room_version=RoomVersions.V1,
|
||||
)
|
||||
|
||||
# V1 federation API is defined to return a content of `[200, {...}]`
|
||||
# due to a historical bug.
|
||||
defer.returnValue((200, (200, content)))
|
||||
|
||||
|
||||
class FederationV2InviteServlet(BaseFederationServlet):
|
||||
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
# TODO(paul): assert that context/event_id parsed from path actually
|
||||
# match those given in content
|
||||
content = yield self.handler.on_invite_request(origin, content)
|
||||
|
||||
room_version = content["room_version"]
|
||||
event = content["event"]
|
||||
invite_room_state = content["invite_room_state"]
|
||||
|
||||
# Synapse expects invite_room_state to be in unsigned, as it is in v1
|
||||
# API
|
||||
|
||||
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
|
||||
|
||||
content = yield self.handler.on_invite_request(
|
||||
origin, event, room_version=room_version,
|
||||
)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
@@ -1263,7 +1298,8 @@ FEDERATION_SERVLET_CLASSES = (
|
||||
FederationEventServlet,
|
||||
FederationSendJoinServlet,
|
||||
FederationSendLeaveServlet,
|
||||
FederationInviteServlet,
|
||||
FederationV1InviteServlet,
|
||||
FederationV2InviteServlet,
|
||||
FederationQueryAuthServlet,
|
||||
FederationGetMissingEventsServlet,
|
||||
FederationEventAuthServlet,
|
||||
|
||||
150
synapse/handlers/acme.py
Normal file
150
synapse/handlers/acme.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
|
||||
import twisted
|
||||
import twisted.internet.error
|
||||
from twisted.internet import defer
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.python.url import URL
|
||||
from twisted.web import server, static
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.app import check_bind_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from txacme.interfaces import ICertificateStore
|
||||
|
||||
@attr.s
|
||||
@implementer(ICertificateStore)
|
||||
class ErsatzStore(object):
|
||||
"""
|
||||
A store that only stores in memory.
|
||||
"""
|
||||
|
||||
certs = attr.ib(default=attr.Factory(dict))
|
||||
|
||||
def store(self, server_name, pem_objects):
|
||||
self.certs[server_name] = [o.as_bytes() for o in pem_objects]
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
except ImportError:
|
||||
# txacme is missing
|
||||
pass
|
||||
|
||||
|
||||
class AcmeHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.reactor = hs.get_reactor()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_listening(self):
|
||||
|
||||
# Configure logging for txacme, if you need to debug
|
||||
# from eliot import add_destinations
|
||||
# from eliot.twisted import TwistedDestination
|
||||
#
|
||||
# add_destinations(TwistedDestination())
|
||||
|
||||
from txacme.challenges import HTTP01Responder
|
||||
from txacme.service import AcmeIssuingService
|
||||
from txacme.endpoint import load_or_create_client_key
|
||||
from txacme.client import Client
|
||||
from josepy.jwa import RS256
|
||||
|
||||
self._store = ErsatzStore()
|
||||
responder = HTTP01Responder()
|
||||
|
||||
self._issuer = AcmeIssuingService(
|
||||
cert_store=self._store,
|
||||
client_creator=(
|
||||
lambda: Client.from_url(
|
||||
reactor=self.reactor,
|
||||
url=URL.from_text(self.hs.config.acme_url),
|
||||
key=load_or_create_client_key(
|
||||
FilePath(self.hs.config.config_dir_path)
|
||||
),
|
||||
alg=RS256,
|
||||
)
|
||||
),
|
||||
clock=self.reactor,
|
||||
responders=[responder],
|
||||
)
|
||||
|
||||
well_known = Resource()
|
||||
well_known.putChild(b'acme-challenge', responder.resource)
|
||||
responder_resource = Resource()
|
||||
responder_resource.putChild(b'.well-known', well_known)
|
||||
responder_resource.putChild(b'check', static.Data(b'OK', b'text/plain'))
|
||||
|
||||
srv = server.Site(responder_resource)
|
||||
|
||||
bind_addresses = self.hs.config.acme_bind_addresses
|
||||
for host in bind_addresses:
|
||||
logger.info(
|
||||
"Listening for ACME requests on %s:%i", host, self.hs.config.acme_port,
|
||||
)
|
||||
try:
|
||||
self.reactor.listenTCP(
|
||||
self.hs.config.acme_port,
|
||||
srv,
|
||||
interface=host,
|
||||
)
|
||||
except twisted.internet.error.CannotListenError as e:
|
||||
check_bind_error(e, host, bind_addresses)
|
||||
|
||||
# Make sure we are registered to the ACME server. There's no public API
|
||||
# for this, it is usually triggered by startService, but since we don't
|
||||
# want it to control where we save the certificates, we have to reach in
|
||||
# and trigger the registration machinery ourselves.
|
||||
self._issuer._registered = False
|
||||
yield self._issuer._ensure_registered()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def provision_certificate(self):
|
||||
|
||||
logger.warning("Reprovisioning %s", self.hs.hostname)
|
||||
|
||||
try:
|
||||
yield self._issuer.issue_cert(self.hs.hostname)
|
||||
except Exception:
|
||||
logger.exception("Fail!")
|
||||
raise
|
||||
logger.warning("Reprovisioned %s, saving.", self.hs.hostname)
|
||||
cert_chain = self._store.certs[self.hs.hostname]
|
||||
|
||||
try:
|
||||
with open(self.hs.config.tls_private_key_file, "wb") as private_key_file:
|
||||
for x in cert_chain:
|
||||
if x.startswith(b"-----BEGIN RSA PRIVATE KEY-----"):
|
||||
private_key_file.write(x)
|
||||
|
||||
with open(self.hs.config.tls_certificate_file, "wb") as certificate_file:
|
||||
for x in cert_chain:
|
||||
if x.startswith(b"-----BEGIN CERTIFICATE-----"):
|
||||
certificate_file.write(x)
|
||||
except Exception:
|
||||
logger.exception("Failed saving!")
|
||||
raise
|
||||
|
||||
defer.returnValue(True)
|
||||
@@ -563,10 +563,10 @@ class AuthHandler(BaseHandler):
|
||||
insensitively, but return None if there are multiple inexact matches.
|
||||
|
||||
Args:
|
||||
(str) user_id: complete @user:id
|
||||
(unicode|bytes) user_id: complete @user:id
|
||||
|
||||
Returns:
|
||||
defer.Deferred: (str) canonical_user_id, or None if zero or
|
||||
defer.Deferred: (unicode) canonical_user_id, or None if zero or
|
||||
multiple matches
|
||||
"""
|
||||
res = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
@@ -954,6 +954,15 @@ class MacaroonGenerator(object):
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
|
||||
"""
|
||||
|
||||
Args:
|
||||
user_id (unicode):
|
||||
duration_in_ms (int):
|
||||
|
||||
Returns:
|
||||
unicode
|
||||
"""
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = login")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
|
||||
@@ -532,6 +532,25 @@ class DeviceListEduUpdater(object):
|
||||
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
|
||||
# If the remote server has more than ~1000 devices for this user
|
||||
# we assume that something is going horribly wrong (e.g. a bot
|
||||
# that logs in and creates a new device every time it tries to
|
||||
# send a message). Maintaining lots of devices per user in the
|
||||
# cache can cause serious performance issues as if this request
|
||||
# takes more than 60s to complete, internal replication from the
|
||||
# inbound federation worker to the synapse master may time out
|
||||
# causing the inbound federation to fail and causing the remote
|
||||
# server to retry, causing a DoS. So in this scenario we give
|
||||
# up on storing the total list of devices and only handle the
|
||||
# delta instead.
|
||||
if len(devices) > 1000:
|
||||
logger.warn(
|
||||
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
||||
user_id, len(devices)
|
||||
)
|
||||
devices = []
|
||||
|
||||
yield self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id,
|
||||
)
|
||||
|
||||
@@ -57,8 +57,8 @@ class DirectoryHandler(BaseHandler):
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
if wchar in room_alias.localpart:
|
||||
raise SynapseError(400, "Invalid characters in room alias")
|
||||
if wchar in room_alias.localpart:
|
||||
raise SynapseError(400, "Invalid characters in room alias")
|
||||
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
@@ -34,6 +34,7 @@ from synapse.api.constants import (
|
||||
EventTypes,
|
||||
Membership,
|
||||
RejectedReason,
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -43,10 +44,7 @@ from synapse.api.errors import (
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.crypto.event_signing import (
|
||||
add_hashes_and_signatures,
|
||||
compute_event_signature,
|
||||
)
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationCleanRoomRestServlet,
|
||||
@@ -58,7 +56,6 @@ from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_joined_room
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.visibility import filter_events_for_server
|
||||
@@ -105,7 +102,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
self.hs = hs
|
||||
|
||||
self.store = hs.get_datastore() # type: synapse.storage.DataStore
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_client = hs.get_federation_client()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
@@ -342,6 +339,8 @@ class FederationHandler(BaseHandler):
|
||||
room_id, event_id, p,
|
||||
)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
with logcontext.nested_logging_context(p):
|
||||
# note that if any of the missing prevs share missing state or
|
||||
# auth events, the requests to fetch those events are deduped
|
||||
@@ -355,7 +354,7 @@ class FederationHandler(BaseHandler):
|
||||
# we want the state *after* p; get_state_for_room returns the
|
||||
# state *before* p.
|
||||
remote_event = yield self.federation_client.get_pdu(
|
||||
[origin], p, outlier=True,
|
||||
[origin], p, room_version, outlier=True,
|
||||
)
|
||||
|
||||
if remote_event is None:
|
||||
@@ -379,7 +378,6 @@ class FederationHandler(BaseHandler):
|
||||
for x in remote_state:
|
||||
event_map[x.event_id] = x
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
state_map = yield resolve_events_with_store(
|
||||
room_version, state_maps, event_map,
|
||||
state_res_store=StateResolutionStore(self.store),
|
||||
@@ -655,6 +653,8 @@ class FederationHandler(BaseHandler):
|
||||
if dest == self.server_name:
|
||||
raise SynapseError(400, "Can't backfill from self.")
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
events = yield self.federation_client.backfill(
|
||||
dest,
|
||||
room_id,
|
||||
@@ -748,6 +748,7 @@ class FederationHandler(BaseHandler):
|
||||
self.federation_client.get_pdu,
|
||||
[dest],
|
||||
event_id,
|
||||
room_version=room_version,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
@@ -1060,7 +1061,7 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
logger.debug("Joining %s to %s", joinee, room_id)
|
||||
|
||||
origin, event = yield self._make_and_verify_event(
|
||||
origin, event, event_format_version = yield self._make_and_verify_event(
|
||||
target_hosts,
|
||||
room_id,
|
||||
joinee,
|
||||
@@ -1083,7 +1084,6 @@ class FederationHandler(BaseHandler):
|
||||
handled_events = set()
|
||||
|
||||
try:
|
||||
event = self._sign_event(event)
|
||||
# Try the host we successfully got a response to /make_join/
|
||||
# request first.
|
||||
try:
|
||||
@@ -1091,7 +1091,9 @@ class FederationHandler(BaseHandler):
|
||||
target_hosts.insert(0, origin)
|
||||
except ValueError:
|
||||
pass
|
||||
ret = yield self.federation_client.send_join(target_hosts, event)
|
||||
ret = yield self.federation_client.send_join(
|
||||
target_hosts, event, event_format_version,
|
||||
)
|
||||
|
||||
origin = ret["origin"]
|
||||
state = ret["state"]
|
||||
@@ -1164,13 +1166,18 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
event_content = {"membership": Membership.JOIN}
|
||||
|
||||
builder = self.event_builder_factory.new({
|
||||
"type": EventTypes.Member,
|
||||
"content": event_content,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": user_id,
|
||||
})
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
builder = self.event_builder_factory.new(
|
||||
room_version,
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"content": event_content,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": user_id,
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
@@ -1182,7 +1189,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_join_request`
|
||||
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
||||
yield self.auth.check_from_context(
|
||||
room_version, event, context, do_sig_check=False,
|
||||
)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@@ -1287,11 +1296,11 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.invite_from_remote = True
|
||||
event.internal_metadata.out_of_band_membership = True
|
||||
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
event.get_pdu_json(),
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
@@ -1304,7 +1313,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
|
||||
origin, event = yield self._make_and_verify_event(
|
||||
origin, event, event_format_version = yield self._make_and_verify_event(
|
||||
target_hosts,
|
||||
room_id,
|
||||
user_id,
|
||||
@@ -1313,7 +1322,7 @@ class FederationHandler(BaseHandler):
|
||||
# Mark as outlier as we don't have any state for this event; we're not
|
||||
# even in the room.
|
||||
event.internal_metadata.outlier = True
|
||||
event = self._sign_event(event)
|
||||
event.internal_metadata.out_of_band_membership = True
|
||||
|
||||
# Try the host that we succesfully called /make_leave/ on first for
|
||||
# the /send_leave/ request.
|
||||
@@ -1336,7 +1345,7 @@ class FederationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
|
||||
content={}, params=None):
|
||||
origin, pdu = yield self.federation_client.make_membership_event(
|
||||
origin, event, format_ver = yield self.federation_client.make_membership_event(
|
||||
target_hosts,
|
||||
room_id,
|
||||
user_id,
|
||||
@@ -1345,9 +1354,7 @@ class FederationHandler(BaseHandler):
|
||||
params=params,
|
||||
)
|
||||
|
||||
logger.debug("Got response to make_%s: %s", membership, pdu)
|
||||
|
||||
event = pdu
|
||||
logger.debug("Got response to make_%s: %s", membership, event)
|
||||
|
||||
# We should assert some things.
|
||||
# FIXME: Do this in a nicer way
|
||||
@@ -1355,28 +1362,7 @@ class FederationHandler(BaseHandler):
|
||||
assert(event.user_id == user_id)
|
||||
assert(event.state_key == user_id)
|
||||
assert(event.room_id == room_id)
|
||||
defer.returnValue((origin, event))
|
||||
|
||||
def _sign_event(self, event):
|
||||
event.internal_metadata.outlier = False
|
||||
|
||||
builder = self.event_builder_factory.new(
|
||||
unfreeze(event.get_pdu_json())
|
||||
)
|
||||
|
||||
builder.event_id = self.event_builder_factory.create_event_id()
|
||||
builder.origin = self.hs.hostname
|
||||
|
||||
if not hasattr(event, "signatures"):
|
||||
builder.signatures = {}
|
||||
|
||||
add_hashes_and_signatures(
|
||||
builder,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0],
|
||||
)
|
||||
|
||||
return builder.build()
|
||||
defer.returnValue((origin, event, format_ver))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -1385,13 +1371,17 @@ class FederationHandler(BaseHandler):
|
||||
leave event for the room and return that. We do *not* persist or
|
||||
process it until the other server has signed it and sent it back.
|
||||
"""
|
||||
builder = self.event_builder_factory.new({
|
||||
"type": EventTypes.Member,
|
||||
"content": {"membership": Membership.LEAVE},
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": user_id,
|
||||
})
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
builder = self.event_builder_factory.new(
|
||||
room_version,
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"content": {"membership": Membership.LEAVE},
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": user_id,
|
||||
}
|
||||
)
|
||||
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
@@ -1400,7 +1390,9 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_leave_request`
|
||||
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
||||
yield self.auth.check_from_context(
|
||||
room_version, event, context, do_sig_check=False,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create new leave %r because %s", event, e)
|
||||
raise e
|
||||
@@ -1659,6 +1651,13 @@ class FederationHandler(BaseHandler):
|
||||
create_event = e
|
||||
break
|
||||
|
||||
if create_event is None:
|
||||
# If the state doesn't have a create event then the room is
|
||||
# invalid, and it would fail auth checks anyway.
|
||||
raise SynapseError(400, "No create event in state")
|
||||
|
||||
room_version = create_event.content.get("room_version", RoomVersions.V1)
|
||||
|
||||
missing_auth_events = set()
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
for e_id in e.auth_event_ids():
|
||||
@@ -1669,6 +1668,7 @@ class FederationHandler(BaseHandler):
|
||||
m_ev = yield self.federation_client.get_pdu(
|
||||
[origin],
|
||||
e_id,
|
||||
room_version=room_version,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
@@ -1687,7 +1687,7 @@ class FederationHandler(BaseHandler):
|
||||
auth_for_e[(EventTypes.Create, "")] = create_event
|
||||
|
||||
try:
|
||||
self.auth.check(e, auth_events=auth_for_e)
|
||||
self.auth.check(room_version, e, auth_events=auth_for_e)
|
||||
except SynapseError as err:
|
||||
# we may get SynapseErrors here as well as AuthErrors. For
|
||||
# instance, there are a couple of (ancient) events in some
|
||||
@@ -1931,6 +1931,8 @@ class FederationHandler(BaseHandler):
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
# Do auth conflict res.
|
||||
logger.info("Different auth: %s", different_auth)
|
||||
@@ -1955,8 +1957,6 @@ class FederationHandler(BaseHandler):
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
new_state = yield self.state_handler.resolve_events(
|
||||
room_version,
|
||||
[list(local_view.values()), list(remote_view.values())],
|
||||
@@ -2056,7 +2056,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=auth_events)
|
||||
self.auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
@@ -2279,18 +2279,26 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
|
||||
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
builder = self.event_builder_factory.new(room_version, event_dict)
|
||||
|
||||
EventValidator().validate_builder(builder)
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder
|
||||
)
|
||||
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
event_dict, event, context
|
||||
room_version, event_dict, event, context
|
||||
)
|
||||
|
||||
EventValidator().validate_new(event)
|
||||
|
||||
# We need to tell the transaction queue to send this out, even
|
||||
# though the sender isn't a local user.
|
||||
event.internal_metadata.send_on_behalf_of = self.hs.hostname
|
||||
|
||||
try:
|
||||
yield self.auth.check_from_context(event, context)
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
@@ -2317,23 +2325,31 @@ class FederationHandler(BaseHandler):
|
||||
Returns:
|
||||
Deferred: resolves (to None)
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
|
||||
# NB: event_dict has a particular specced format we might need to fudge
|
||||
# if we change event formats too much.
|
||||
builder = self.event_builder_factory.new(room_version, event_dict)
|
||||
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
event_dict, event, context
|
||||
room_version, event_dict, event, context
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check_from_context(event, context)
|
||||
self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
# We need to tell the transaction queue to send this out, even
|
||||
# though the sender isn't a local user.
|
||||
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
|
||||
|
||||
# XXX we send the invite here, but send_membership_event also sends it,
|
||||
# so we end up making two requests. I think this is redundant.
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
@@ -2344,7 +2360,8 @@ class FederationHandler(BaseHandler):
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_display_name_to_third_party_invite(self, event_dict, event, context):
|
||||
def add_display_name_to_third_party_invite(self, room_version, event_dict,
|
||||
event, context):
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
@@ -2368,11 +2385,12 @@ class FederationHandler(BaseHandler):
|
||||
# auth checks. If we need the invite and don't have it then the
|
||||
# auth check code will explode appropriately.
|
||||
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
builder = self.event_builder_factory.new(room_version, event_dict)
|
||||
EventValidator().validate_builder(builder)
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
EventValidator().validate_new(event)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -167,18 +167,21 @@ class IdentityHandler(BaseHandler):
|
||||
"mxid": mxid,
|
||||
"threepid": threepid,
|
||||
}
|
||||
headers = {}
|
||||
|
||||
# we abuse the federation http client to sign the request, but we have to send it
|
||||
# using the normal http client since we don't want the SRV lookup and want normal
|
||||
# 'browser-like' HTTPS.
|
||||
self.federation_http_client.sign_request(
|
||||
auth_headers = self.federation_http_client.build_auth_headers(
|
||||
destination=None,
|
||||
method='POST',
|
||||
url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
|
||||
headers_dict=headers,
|
||||
content=content,
|
||||
destination_is=id_server,
|
||||
)
|
||||
headers = {
|
||||
b"Authorization": auth_headers,
|
||||
}
|
||||
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(
|
||||
url,
|
||||
|
||||
@@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import succeed
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.constants import EventTypes, Membership, RoomVersions
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -31,7 +31,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.api.urls import ConsentURIBuilder
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
@@ -278,9 +277,17 @@ class EventCreationHandler(object):
|
||||
"""
|
||||
yield self.auth.check_auth_blocking(requester.user.to_string())
|
||||
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "":
|
||||
room_version = event_dict["content"]["room_version"]
|
||||
else:
|
||||
try:
|
||||
room_version = yield self.store.get_room_version(event_dict["room_id"])
|
||||
except NotFoundError:
|
||||
raise AuthError(403, "Unknown room")
|
||||
|
||||
self.validator.validate_new(builder)
|
||||
builder = self.event_builder_factory.new(room_version, event_dict)
|
||||
|
||||
self.validator.validate_builder(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
@@ -318,6 +325,8 @@ class EventCreationHandler(object):
|
||||
prev_events_and_hashes=prev_events_and_hashes,
|
||||
)
|
||||
|
||||
self.validator.validate_new(event)
|
||||
|
||||
defer.returnValue((event, context))
|
||||
|
||||
def _is_exempt_from_privacy_policy(self, builder, requester):
|
||||
@@ -535,40 +544,19 @@ class EventCreationHandler(object):
|
||||
prev_events_and_hashes = \
|
||||
yield self.store.get_prev_events_for_room(builder.room_id)
|
||||
|
||||
if prev_events_and_hashes:
|
||||
depth = max([d for _, _, d in prev_events_and_hashes]) + 1
|
||||
# we cap depth of generated events, to ensure that they are not
|
||||
# rejected by other servers (and so that they can be persisted in
|
||||
# the db)
|
||||
depth = min(depth, MAX_DEPTH)
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
prev_events = [
|
||||
(event_id, prev_hashes)
|
||||
for event_id, prev_hashes, _ in prev_events_and_hashes
|
||||
]
|
||||
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
|
||||
context = yield self.state.compute_event_context(builder)
|
||||
event = yield builder.build(
|
||||
prev_event_ids=[p for p, _ in prev_events],
|
||||
)
|
||||
context = yield self.state.compute_event_context(event)
|
||||
if requester:
|
||||
context.app_service = requester.app_service
|
||||
|
||||
if builder.is_state():
|
||||
builder.prev_state = yield self.store.add_event_hashes(
|
||||
context.prev_state_events
|
||||
)
|
||||
|
||||
yield self.auth.add_auth_events(builder, context)
|
||||
|
||||
signing_key = self.hs.config.signing_key[0]
|
||||
add_hashes_and_signatures(
|
||||
builder, self.server_name, signing_key
|
||||
)
|
||||
|
||||
event = builder.build()
|
||||
self.validator.validate_new(event)
|
||||
|
||||
logger.debug(
|
||||
"Created event %s",
|
||||
@@ -603,8 +591,13 @@ class EventCreationHandler(object):
|
||||
extra_users (list(UserID)): Any extra users to notify about event
|
||||
"""
|
||||
|
||||
if event.is_state() and (event.type, event.state_key) == (EventTypes.Create, ""):
|
||||
room_version = event.content.get("room_version", RoomVersions.V1)
|
||||
else:
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
|
||||
try:
|
||||
yield self.auth.check_from_context(event, context)
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as err:
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
@@ -752,7 +745,8 @@ class EventCreationHandler(object):
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_events.values()
|
||||
}
|
||||
if self.auth.check_redaction(event, auth_events=auth_events):
|
||||
room_version = yield self.store.get_room_version(event.room_id)
|
||||
if self.auth.check_redaction(room_version, event, auth_events=auth_events):
|
||||
original_event = yield self.store.get_event(
|
||||
event.redacts,
|
||||
check_redacted=False,
|
||||
@@ -766,6 +760,9 @@ class EventCreationHandler(object):
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
# We've already checked.
|
||||
event.internal_metadata.recheck_redaction = False
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
if prev_state_ids:
|
||||
|
||||
@@ -235,6 +235,17 @@ class PaginationHandler(object):
|
||||
"room_key", next_key
|
||||
)
|
||||
|
||||
if events:
|
||||
if event_filter:
|
||||
events = event_filter.filter(events)
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
if not events:
|
||||
defer.returnValue({
|
||||
"chunk": [],
|
||||
@@ -242,16 +253,6 @@ class PaginationHandler(object):
|
||||
"end": next_token.to_string(),
|
||||
})
|
||||
|
||||
if event_filter:
|
||||
events = event_filter.filter(events)
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
state = None
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
# TODO: remove redundant members
|
||||
|
||||
@@ -126,6 +126,8 @@ class RegistrationHandler(BaseHandler):
|
||||
make_guest=False,
|
||||
admin=False,
|
||||
threepid=None,
|
||||
user_type=None,
|
||||
default_display_name=None,
|
||||
):
|
||||
"""Registers a new client on the server.
|
||||
|
||||
@@ -140,6 +142,10 @@ class RegistrationHandler(BaseHandler):
|
||||
since it offers no means of associating a device_id with the
|
||||
access_token. Instead you should call auth_handler.issue_access_token
|
||||
after registration.
|
||||
user_type (str|None): type of user. One of the values from
|
||||
api.constants.UserTypes, or None for a normal user.
|
||||
default_display_name (unicode|None): if set, the new user's displayname
|
||||
will be set to this. Defaults to 'localpart'.
|
||||
Returns:
|
||||
A tuple of (user_id, access_token).
|
||||
Raises:
|
||||
@@ -169,6 +175,13 @@ class RegistrationHandler(BaseHandler):
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
if was_guest:
|
||||
# If the user was a guest then they already have a profile
|
||||
default_display_name = None
|
||||
|
||||
elif default_display_name is None:
|
||||
default_display_name = localpart
|
||||
|
||||
token = None
|
||||
if generate_token:
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
@@ -178,11 +191,9 @@ class RegistrationHandler(BaseHandler):
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=(
|
||||
# If the user was a guest then they already have a profile
|
||||
None if was_guest else user.localpart
|
||||
),
|
||||
create_profile_with_displayname=default_display_name,
|
||||
admin=admin,
|
||||
user_type=user_type,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -203,13 +214,15 @@ class RegistrationHandler(BaseHandler):
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
if generate_token:
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
if default_display_name is None:
|
||||
default_display_name = localpart
|
||||
try:
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
create_profile_with_displayname=default_display_name,
|
||||
)
|
||||
except SynapseError:
|
||||
# if user id is taken, just generate another
|
||||
@@ -233,9 +246,16 @@ class RegistrationHandler(BaseHandler):
|
||||
# auto-join the user to any rooms we're supposed to dump them into
|
||||
fake_requester = create_requester(user_id)
|
||||
|
||||
# try to create the room if we're the first user on the server
|
||||
# try to create the room if we're the first real user on the server. Note
|
||||
# that an auto-generated support user is not a real user and will never be
|
||||
# the user to create the room
|
||||
should_auto_create_rooms = False
|
||||
if self.hs.config.autocreate_auto_join_rooms:
|
||||
is_support = yield self.store.is_support_user(user_id)
|
||||
# There is an edge case where the first user is the support user, then
|
||||
# the room is never created, though this seems unlikely and
|
||||
# recoverable from given the support user being involved in the first
|
||||
# place.
|
||||
if self.hs.config.autocreate_auto_join_rooms and not is_support:
|
||||
count = yield self.store.count_all_users()
|
||||
should_auto_create_rooms = count == 1
|
||||
for r in self.hs.config.auto_join_rooms:
|
||||
@@ -300,7 +320,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
appservice_id=service_id,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
create_profile_with_displayname=user.localpart,
|
||||
)
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@@ -327,35 +347,6 @@ class RegistrationHandler(BaseHandler):
|
||||
else:
|
||||
logger.info("Valid captcha entered from %s", ip)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_saml2(self, localpart):
|
||||
"""
|
||||
Registers email_id as SAML2 Based Auth.
|
||||
"""
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./'",
|
||||
)
|
||||
yield self.auth.check_auth_blocking()
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
try:
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=None,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
except Exception as e:
|
||||
yield self.store.add_access_token_to_user(user_id, token)
|
||||
# Ignore Registration errors
|
||||
logger.exception(e)
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register_email(self, threepidCreds):
|
||||
"""
|
||||
@@ -507,7 +498,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
create_profile_with_displayname=user.localpart,
|
||||
)
|
||||
else:
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
|
||||
@@ -123,9 +123,12 @@ class RoomCreationHandler(BaseHandler):
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
)
|
||||
yield self.auth.check_from_context(tombstone_event, tombstone_context)
|
||||
old_room_version = yield self.store.get_room_version(old_room_id)
|
||||
yield self.auth.check_from_context(
|
||||
old_room_version, tombstone_event, tombstone_context,
|
||||
)
|
||||
|
||||
yield self.clone_exiting_room(
|
||||
yield self.clone_existing_room(
|
||||
requester,
|
||||
old_room_id=old_room_id,
|
||||
new_room_id=new_room_id,
|
||||
@@ -230,7 +233,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def clone_exiting_room(
|
||||
def clone_existing_room(
|
||||
self, requester, old_room_id, new_room_id, new_room_version,
|
||||
tombstone_event_id,
|
||||
):
|
||||
@@ -262,6 +265,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
initial_state = dict()
|
||||
|
||||
# Replicate relevant room events
|
||||
types_to_copy = (
|
||||
(EventTypes.JoinRules, ""),
|
||||
(EventTypes.Name, ""),
|
||||
@@ -269,6 +273,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.GuestAccess, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
(EventTypes.Encryption, ""),
|
||||
)
|
||||
|
||||
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
@@ -433,7 +438,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
self.auth.check_auth_blocking(user_id)
|
||||
yield self.auth.check_auth_blocking(user_id)
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
@@ -73,8 +73,14 @@ class RoomListHandler(BaseHandler):
|
||||
# We explicitly don't bother caching searches or requests for
|
||||
# appservice specific lists.
|
||||
logger.info("Bypassing cache as search request.")
|
||||
|
||||
# XXX: Quick hack to stop room directory queries taking too long.
|
||||
# Timeout request after 60s. Probably want a more fundamental
|
||||
# solution at some point
|
||||
timeout = self.clock.time() + 60
|
||||
return self._get_public_room_list(
|
||||
limit, since_token, search_filter, network_tuple=network_tuple,
|
||||
limit, since_token, search_filter,
|
||||
network_tuple=network_tuple, timeout=timeout,
|
||||
)
|
||||
|
||||
key = (limit, since_token, network_tuple)
|
||||
@@ -87,7 +93,8 @@ class RoomListHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,):
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
timeout=None,):
|
||||
if since_token and since_token != "END":
|
||||
since_token = RoomListNextBatch.from_token(since_token)
|
||||
else:
|
||||
@@ -202,6 +209,9 @@ class RoomListHandler(BaseHandler):
|
||||
|
||||
chunk = []
|
||||
for i in range(0, len(rooms_to_scan), step):
|
||||
if timeout and self.clock.time() > timeout:
|
||||
raise Exception("Timed out searching room directory")
|
||||
|
||||
batch = rooms_to_scan[i:i + step]
|
||||
logger.info("Processing %i rooms for result", len(batch))
|
||||
yield concurrently_execute(
|
||||
|
||||
@@ -63,7 +63,7 @@ class RoomMemberHandler(object):
|
||||
self.directory_handler = hs.get_handlers().directory_handler
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_hander = hs.get_event_creation_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
|
||||
@@ -161,6 +161,8 @@ class RoomMemberHandler(object):
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
):
|
||||
user_id = target.to_string()
|
||||
|
||||
if content is None:
|
||||
content = {}
|
||||
|
||||
@@ -168,14 +170,14 @@ class RoomMemberHandler(object):
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
|
||||
event, context = yield self.event_creation_hander.create_event(
|
||||
event, context = yield self.event_creation_handler.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"content": content,
|
||||
"room_id": room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"state_key": target.to_string(),
|
||||
"state_key": user_id,
|
||||
|
||||
# For backwards compatibility:
|
||||
"membership": membership,
|
||||
@@ -186,14 +188,14 @@ class RoomMemberHandler(object):
|
||||
)
|
||||
|
||||
# Check if this event matches the previous membership event for the user.
|
||||
duplicate = yield self.event_creation_hander.deduplicate_state_event(
|
||||
duplicate = yield self.event_creation_handler.deduplicate_state_event(
|
||||
event, context,
|
||||
)
|
||||
if duplicate is not None:
|
||||
# Discard the new event since this membership change is a no-op.
|
||||
defer.returnValue(duplicate)
|
||||
|
||||
yield self.event_creation_hander.handle_new_client_event(
|
||||
yield self.event_creation_handler.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
@@ -204,12 +206,12 @@ class RoomMemberHandler(object):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
|
||||
prev_member_event_id = prev_state_ids.get(
|
||||
(EventTypes.Member, target.to_string()),
|
||||
(EventTypes.Member, user_id),
|
||||
None
|
||||
)
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# Only fire user_joined_room if the user has actually joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
newly_joined = True
|
||||
@@ -218,6 +220,18 @@ class RoomMemberHandler(object):
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
yield self._user_joined_room(target, room_id)
|
||||
|
||||
# Copy over direct message status and room tags if this is a join
|
||||
# on an upgraded room
|
||||
|
||||
# Check if this is an upgraded room
|
||||
predecessor = yield self.store.get_room_predecessor(room_id)
|
||||
|
||||
if predecessor:
|
||||
# It is an upgraded room. Copy over old tags
|
||||
self.copy_room_tags_and_direct_to_room(
|
||||
predecessor["room_id"], room_id, user_id,
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
@@ -226,6 +240,55 @@ class RoomMemberHandler(object):
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def copy_room_tags_and_direct_to_room(
|
||||
self,
|
||||
old_room_id,
|
||||
new_room_id,
|
||||
user_id,
|
||||
):
|
||||
"""Copies the tags and direct room state from one room to another.
|
||||
|
||||
Args:
|
||||
old_room_id (str)
|
||||
new_room_id (str)
|
||||
user_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
"""
|
||||
# Retrieve user account data for predecessor room
|
||||
user_account_data, _ = yield self.store.get_account_data_for_user(
|
||||
user_id,
|
||||
)
|
||||
|
||||
# Copy direct message state if applicable
|
||||
direct_rooms = user_account_data.get("m.direct", {})
|
||||
|
||||
# Check which key this room is under
|
||||
if isinstance(direct_rooms, dict):
|
||||
for key, room_id_list in direct_rooms.items():
|
||||
if old_room_id in room_id_list and new_room_id not in room_id_list:
|
||||
# Add new room_id to this key
|
||||
direct_rooms[key].append(new_room_id)
|
||||
|
||||
# Save back to user's m.direct account data
|
||||
yield self.store.add_account_data_for_user(
|
||||
user_id, "m.direct", direct_rooms,
|
||||
)
|
||||
break
|
||||
|
||||
# Copy room tags if applicable
|
||||
room_tags = yield self.store.get_tags_for_room(
|
||||
user_id, old_room_id,
|
||||
)
|
||||
|
||||
# Copy each room tag to the new room
|
||||
for tag, tag_content in room_tags.items():
|
||||
yield self.store.add_tag_to_room(
|
||||
user_id, new_room_id, tag, tag_content
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_membership(
|
||||
self,
|
||||
@@ -493,7 +556,7 @@ class RoomMemberHandler(object):
|
||||
else:
|
||||
requester = synapse.types.create_requester(target_user)
|
||||
|
||||
prev_event = yield self.event_creation_hander.deduplicate_state_event(
|
||||
prev_event = yield self.event_creation_handler.deduplicate_state_event(
|
||||
event, context,
|
||||
)
|
||||
if prev_event is not None:
|
||||
@@ -513,7 +576,7 @@ class RoomMemberHandler(object):
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
yield self.event_creation_hander.handle_new_client_event(
|
||||
yield self.event_creation_handler.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
@@ -527,7 +590,7 @@ class RoomMemberHandler(object):
|
||||
)
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# Only fire user_joined_room if the user has actually joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
newly_joined = True
|
||||
@@ -755,7 +818,7 @@ class RoomMemberHandler(object):
|
||||
)
|
||||
)
|
||||
|
||||
yield self.event_creation_hander.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.ThirdPartyInvite,
|
||||
@@ -877,7 +940,8 @@ class RoomMemberHandler(object):
|
||||
# first member event?
|
||||
create_event_id = current_state_ids.get(("m.room.create", ""))
|
||||
if len(current_state_ids) == 1 and create_event_id:
|
||||
defer.returnValue(self.hs.is_mine_id(create_event_id))
|
||||
# We can only get here if we're in the process of creating the room
|
||||
defer.returnValue(True)
|
||||
|
||||
for etype, state_key in current_state_ids:
|
||||
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
|
||||
|
||||
@@ -37,6 +37,41 @@ class SearchHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(SearchHandler, self).__init__(hs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_old_rooms_from_upgraded_room(self, room_id):
|
||||
"""Retrieves room IDs of old rooms in the history of an upgraded room.
|
||||
|
||||
We do so by checking the m.room.create event of the room for a
|
||||
`predecessor` key. If it exists, we add the room ID to our return
|
||||
list and then check that room for a m.room.create event and so on
|
||||
until we can no longer find any more previous rooms.
|
||||
|
||||
The full list of all found rooms in then returned.
|
||||
|
||||
Args:
|
||||
room_id (str): id of the room to search through.
|
||||
|
||||
Returns:
|
||||
Deferred[iterable[unicode]]: predecessor room ids
|
||||
"""
|
||||
|
||||
historical_room_ids = []
|
||||
|
||||
while True:
|
||||
predecessor = yield self.store.get_room_predecessor(room_id)
|
||||
|
||||
# If no predecessor, assume we've hit a dead end
|
||||
if not predecessor:
|
||||
break
|
||||
|
||||
# Add predecessor's room ID
|
||||
historical_room_ids.append(predecessor["room_id"])
|
||||
|
||||
# Scan through the old room for further predecessors
|
||||
room_id = predecessor["room_id"]
|
||||
|
||||
defer.returnValue(historical_room_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def search(self, user, content, batch=None):
|
||||
"""Performs a full text search for a user.
|
||||
@@ -137,6 +172,18 @@ class SearchHandler(BaseHandler):
|
||||
)
|
||||
room_ids = set(r.room_id for r in rooms)
|
||||
|
||||
# If doing a subset of all rooms seearch, check if any of the rooms
|
||||
# are from an upgraded room, and search their contents as well
|
||||
if search_filter.rooms:
|
||||
historical_room_ids = []
|
||||
for room_id in search_filter.rooms:
|
||||
# Add any previous rooms to the search if they exist
|
||||
ids = yield self.get_old_rooms_from_upgraded_room(room_id)
|
||||
historical_room_ids += ids
|
||||
|
||||
# Prevent any historical events from being filtered
|
||||
search_filter = search_filter.with_room_ids(historical_room_ids)
|
||||
|
||||
room_ids = search_filter.filter_rooms(room_ids)
|
||||
|
||||
if batch_group == "room_id":
|
||||
|
||||
@@ -1473,10 +1473,22 @@ class SyncHandler(object):
|
||||
if since_token and since_token.is_after(leave_token):
|
||||
continue
|
||||
|
||||
# If this is an out of band message, like a remote invite
|
||||
# rejection, we include it in the recents batch. Otherwise, we
|
||||
# let _load_filtered_recents handle fetching the correct
|
||||
# batches.
|
||||
#
|
||||
# This is all screaming out for a refactor, as the logic here is
|
||||
# subtle and the moving parts numerous.
|
||||
if leave_event.internal_metadata.is_out_of_band_membership():
|
||||
batch_events = [leave_event]
|
||||
else:
|
||||
batch_events = None
|
||||
|
||||
room_entries.append(RoomSyncResultBuilder(
|
||||
room_id=room_id,
|
||||
rtype="archived",
|
||||
events=None,
|
||||
events=batch_events,
|
||||
newly_joined=room_id in newly_joined_rooms,
|
||||
full_state=False,
|
||||
since_token=since_token,
|
||||
@@ -1668,13 +1680,17 @@ class SyncHandler(object):
|
||||
"content": content,
|
||||
})
|
||||
|
||||
account_data = sync_config.filter_collection.filter_room_account_data(
|
||||
account_data_events = sync_config.filter_collection.filter_room_account_data(
|
||||
account_data_events
|
||||
)
|
||||
|
||||
ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)
|
||||
|
||||
if not (always_include or batch or account_data or ephemeral or full_state):
|
||||
if not (always_include
|
||||
or batch
|
||||
or account_data_events
|
||||
or ephemeral
|
||||
or full_state):
|
||||
return
|
||||
|
||||
state = yield self.compute_state_delta(
|
||||
@@ -1745,7 +1761,7 @@ class SyncHandler(object):
|
||||
room_id=room_id,
|
||||
timeline=batch,
|
||||
state=state,
|
||||
account_data=account_data,
|
||||
account_data=account_data_events,
|
||||
)
|
||||
if room_sync or always_include:
|
||||
sync_result_builder.archived.append(room_sync)
|
||||
|
||||
@@ -19,6 +19,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
@@ -125,9 +126,12 @@ class UserDirectoryHandler(object):
|
||||
"""
|
||||
# FIXME(#3714): We should probably do this in the same worker as all
|
||||
# the other changes.
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
)
|
||||
is_support = yield self.store.is_support_user(user_id)
|
||||
# Support users are for diagnostics and should not appear in the user directory.
|
||||
if not is_support:
|
||||
yield self.store.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_user_deactivated(self, user_id):
|
||||
@@ -160,6 +164,11 @@ class UserDirectoryHandler(object):
|
||||
yield self._handle_deltas(deltas)
|
||||
|
||||
self.pos = deltas[-1]["stream_id"]
|
||||
|
||||
# Expose current event processing position to prometheus
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"user_dir").set(self.pos)
|
||||
|
||||
yield self.store.update_user_directory_stream_pos(self.pos)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -329,14 +338,7 @@ class UserDirectoryHandler(object):
|
||||
public_value=Membership.JOIN,
|
||||
)
|
||||
|
||||
if change is None:
|
||||
# Handle any profile changes
|
||||
yield self._handle_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
if not change:
|
||||
if change is False:
|
||||
# Need to check if the server left the room entirely, if so
|
||||
# we might need to remove all the users in that room
|
||||
is_in_room = yield self.store.is_host_joined(
|
||||
@@ -354,16 +356,25 @@ class UserDirectoryHandler(object):
|
||||
else:
|
||||
logger.debug("Server is still in room: %r", room_id)
|
||||
|
||||
if change: # The user joined
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
profile = ProfileInfo(
|
||||
avatar_url=event.content.get("avatar_url"),
|
||||
display_name=event.content.get("displayname"),
|
||||
)
|
||||
is_support = yield self.store.is_support_user(state_key)
|
||||
if not is_support:
|
||||
if change is None:
|
||||
# Handle any profile changes
|
||||
yield self._handle_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
yield self._handle_new_user(room_id, state_key, profile)
|
||||
else: # The user left
|
||||
yield self._handle_remove_user(room_id, state_key)
|
||||
if change: # The user joined
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
profile = ProfileInfo(
|
||||
avatar_url=event.content.get("avatar_url"),
|
||||
display_name=event.content.get("displayname"),
|
||||
)
|
||||
|
||||
yield self._handle_new_user(room_id, state_key, profile)
|
||||
else: # The user left
|
||||
yield self._handle_remove_user(room_id, state_key)
|
||||
else:
|
||||
logger.debug("Ignoring irrelevant type: %r", typ)
|
||||
|
||||
|
||||
@@ -21,28 +21,25 @@ from six.moves import urllib
|
||||
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json, json
|
||||
from netaddr import IPAddress
|
||||
from prometheus_client import Counter
|
||||
from zope.interface import implementer, provider
|
||||
|
||||
from OpenSSL import SSL
|
||||
from OpenSSL.SSL import VERIFY_NONE
|
||||
from twisted.internet import defer, protocol, reactor, ssl
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import (
|
||||
Agent,
|
||||
BrowserLikeRedirectAgent,
|
||||
ContentDecoderAgent,
|
||||
GzipDecoder,
|
||||
HTTPConnectionPool,
|
||||
PartialDownloadError,
|
||||
readBody,
|
||||
from twisted.internet import defer, protocol, ssl
|
||||
from twisted.internet.interfaces import (
|
||||
IReactorPluggableNameResolver,
|
||||
IResolutionReceiver,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody
|
||||
from twisted.web.http import PotentialDataLoss
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.http import cancelled_to_request_timed_out_error, redact_uri
|
||||
from synapse.http.endpoint import SpiderEndpoint
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
@@ -50,8 +47,125 @@ from synapse.util.logcontext import make_deferred_yieldable
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
|
||||
incoming_responses_counter = Counter("synapse_http_client_responses", "",
|
||||
["method", "code"])
|
||||
incoming_responses_counter = Counter(
|
||||
"synapse_http_client_responses", "", ["method", "code"]
|
||||
)
|
||||
|
||||
|
||||
def check_against_blacklist(ip_address, ip_whitelist, ip_blacklist):
|
||||
"""
|
||||
Args:
|
||||
ip_address (netaddr.IPAddress)
|
||||
ip_whitelist (netaddr.IPSet)
|
||||
ip_blacklist (netaddr.IPSet)
|
||||
"""
|
||||
if ip_address in ip_blacklist:
|
||||
if ip_whitelist is None or ip_address not in ip_whitelist:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class IPBlacklistingResolver(object):
|
||||
"""
|
||||
A proxy for reactor.nameResolver which only produces non-blacklisted IP
|
||||
addresses, preventing DNS rebinding attacks on URL preview.
|
||||
"""
|
||||
|
||||
def __init__(self, reactor, ip_whitelist, ip_blacklist):
|
||||
"""
|
||||
Args:
|
||||
reactor (twisted.internet.reactor)
|
||||
ip_whitelist (netaddr.IPSet)
|
||||
ip_blacklist (netaddr.IPSet)
|
||||
"""
|
||||
self._reactor = reactor
|
||||
self._ip_whitelist = ip_whitelist
|
||||
self._ip_blacklist = ip_blacklist
|
||||
|
||||
def resolveHostName(self, recv, hostname, portNumber=0):
|
||||
|
||||
r = recv()
|
||||
d = defer.Deferred()
|
||||
addresses = []
|
||||
|
||||
@provider(IResolutionReceiver)
|
||||
class EndpointReceiver(object):
|
||||
@staticmethod
|
||||
def resolutionBegan(resolutionInProgress):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def addressResolved(address):
|
||||
ip_address = IPAddress(address.host)
|
||||
|
||||
if check_against_blacklist(
|
||||
ip_address, self._ip_whitelist, self._ip_blacklist
|
||||
):
|
||||
logger.info(
|
||||
"Dropped %s from DNS resolution to %s" % (ip_address, hostname)
|
||||
)
|
||||
raise SynapseError(403, "IP address blocked by IP blacklist entry")
|
||||
|
||||
addresses.append(address)
|
||||
|
||||
@staticmethod
|
||||
def resolutionComplete():
|
||||
d.callback(addresses)
|
||||
|
||||
self._reactor.nameResolver.resolveHostName(
|
||||
EndpointReceiver, hostname, portNumber=portNumber
|
||||
)
|
||||
|
||||
def _callback(addrs):
|
||||
r.resolutionBegan(None)
|
||||
for i in addrs:
|
||||
r.addressResolved(i)
|
||||
r.resolutionComplete()
|
||||
|
||||
d.addCallback(_callback)
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class BlacklistingAgentWrapper(Agent):
|
||||
"""
|
||||
An Agent wrapper which will prevent access to IP addresses being accessed
|
||||
directly (without an IP address lookup).
|
||||
"""
|
||||
|
||||
def __init__(self, agent, reactor, ip_whitelist=None, ip_blacklist=None):
|
||||
"""
|
||||
Args:
|
||||
agent (twisted.web.client.Agent): The Agent to wrap.
|
||||
reactor (twisted.internet.reactor)
|
||||
ip_whitelist (netaddr.IPSet)
|
||||
ip_blacklist (netaddr.IPSet)
|
||||
"""
|
||||
self._agent = agent
|
||||
self._ip_whitelist = ip_whitelist
|
||||
self._ip_blacklist = ip_blacklist
|
||||
|
||||
def request(self, method, uri, headers=None, bodyProducer=None):
|
||||
h = urllib.parse.urlparse(uri.decode('ascii'))
|
||||
|
||||
try:
|
||||
ip_address = IPAddress(h.hostname)
|
||||
|
||||
if check_against_blacklist(
|
||||
ip_address, self._ip_whitelist, self._ip_blacklist
|
||||
):
|
||||
logger.info(
|
||||
"Blocking access to %s because of blacklist" % (ip_address,)
|
||||
)
|
||||
e = SynapseError(403, "IP address blocked by IP blacklist entry")
|
||||
return defer.fail(Failure(e))
|
||||
except Exception:
|
||||
# Not an IP
|
||||
pass
|
||||
|
||||
return self._agent.request(
|
||||
method, uri, headers=headers, bodyProducer=bodyProducer
|
||||
)
|
||||
|
||||
|
||||
class SimpleHttpClient(object):
|
||||
@@ -59,14 +173,54 @@ class SimpleHttpClient(object):
|
||||
A simple, no-frills HTTP client with methods that wrap up common ways of
|
||||
using HTTP in Matrix
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
|
||||
def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer)
|
||||
treq_args (dict): Extra keyword arguments to be given to treq.request.
|
||||
ip_blacklist (netaddr.IPSet): The IP addresses that are blacklisted that
|
||||
we may not request.
|
||||
ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
|
||||
request if it were otherwise caught in a blacklist.
|
||||
"""
|
||||
self.hs = hs
|
||||
|
||||
pool = HTTPConnectionPool(reactor)
|
||||
self._ip_whitelist = ip_whitelist
|
||||
self._ip_blacklist = ip_blacklist
|
||||
self._extra_treq_args = treq_args
|
||||
|
||||
self.user_agent = hs.version_string
|
||||
self.clock = hs.get_clock()
|
||||
if hs.config.user_agent_suffix:
|
||||
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix)
|
||||
|
||||
self.user_agent = self.user_agent.encode('ascii')
|
||||
|
||||
if self._ip_blacklist:
|
||||
real_reactor = hs.get_reactor()
|
||||
# If we have an IP blacklist, we need to use a DNS resolver which
|
||||
# filters out blacklisted IP addresses, to prevent DNS rebinding.
|
||||
nameResolver = IPBlacklistingResolver(
|
||||
real_reactor, self._ip_whitelist, self._ip_blacklist
|
||||
)
|
||||
|
||||
@implementer(IReactorPluggableNameResolver)
|
||||
class Reactor(object):
|
||||
def __getattr__(_self, attr):
|
||||
if attr == "nameResolver":
|
||||
return nameResolver
|
||||
else:
|
||||
return getattr(real_reactor, attr)
|
||||
|
||||
self.reactor = Reactor()
|
||||
else:
|
||||
self.reactor = hs.get_reactor()
|
||||
|
||||
# the pusher makes lots of concurrent SSL connections to sygnal, and
|
||||
# tends to do so in batches, so we need to allow the pool to keep lots
|
||||
# of idle connections around.
|
||||
# tends to do so in batches, so we need to allow the pool to keep
|
||||
# lots of idle connections around.
|
||||
pool = HTTPConnectionPool(self.reactor)
|
||||
pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5))
|
||||
pool.cachedConnectionTimeout = 2 * 60
|
||||
|
||||
@@ -74,20 +228,35 @@ class SimpleHttpClient(object):
|
||||
# BrowserLikePolicyForHTTPS which will do regular cert validation
|
||||
# 'like a browser'
|
||||
self.agent = Agent(
|
||||
reactor,
|
||||
self.reactor,
|
||||
connectTimeout=15,
|
||||
contextFactory=hs.get_http_client_context_factory(),
|
||||
contextFactory=self.hs.get_http_client_context_factory(),
|
||||
pool=pool,
|
||||
)
|
||||
self.user_agent = hs.version_string
|
||||
self.clock = hs.get_clock()
|
||||
if hs.config.user_agent_suffix:
|
||||
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
|
||||
|
||||
self.user_agent = self.user_agent.encode('ascii')
|
||||
if self._ip_blacklist:
|
||||
# If we have an IP blacklist, we then install the blacklisting Agent
|
||||
# which prevents direct access to IP addresses, that are not caught
|
||||
# by the DNS resolution.
|
||||
self.agent = BlacklistingAgentWrapper(
|
||||
self.agent,
|
||||
self.reactor,
|
||||
ip_whitelist=self._ip_whitelist,
|
||||
ip_blacklist=self._ip_blacklist,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, data=b'', headers=None):
|
||||
"""
|
||||
Args:
|
||||
method (str): HTTP method to use.
|
||||
uri (str): URI to query.
|
||||
data (bytes): Data to send in the request body, if applicable.
|
||||
headers (t.w.http_headers.Headers): Request headers.
|
||||
|
||||
Raises:
|
||||
SynapseError: If the IP is blacklisted.
|
||||
"""
|
||||
# A small wrapper around self.agent.request() so we can easily attach
|
||||
# counters to it
|
||||
outgoing_requests_counter.labels(method).inc()
|
||||
@@ -97,25 +266,34 @@ class SimpleHttpClient(object):
|
||||
|
||||
try:
|
||||
request_deferred = treq.request(
|
||||
method, uri, agent=self.agent, data=data, headers=headers
|
||||
method,
|
||||
uri,
|
||||
agent=self.agent,
|
||||
data=data,
|
||||
headers=headers,
|
||||
**self._extra_treq_args
|
||||
)
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred, 60, self.hs.get_reactor(),
|
||||
request_deferred,
|
||||
60,
|
||||
self.hs.get_reactor(),
|
||||
cancelled_to_request_timed_out_error,
|
||||
)
|
||||
response = yield make_deferred_yieldable(request_deferred)
|
||||
|
||||
incoming_responses_counter.labels(method, response.code).inc()
|
||||
logger.info(
|
||||
"Received response to %s %s: %s",
|
||||
method, redact_uri(uri), response.code
|
||||
"Received response to %s %s: %s", method, redact_uri(uri), response.code
|
||||
)
|
||||
defer.returnValue(response)
|
||||
except Exception as e:
|
||||
incoming_responses_counter.labels(method, "ERR").inc()
|
||||
logger.info(
|
||||
"Error sending request to %s %s: %s %s",
|
||||
method, redact_uri(uri), type(e).__name__, e.args[0]
|
||||
method,
|
||||
redact_uri(uri),
|
||||
type(e).__name__,
|
||||
e.args[0],
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -140,8 +318,9 @@ class SimpleHttpClient(object):
|
||||
# TODO: Do we ever want to log message contents?
|
||||
logger.debug("post_urlencoded_get_json args: %s", args)
|
||||
|
||||
query_bytes = urllib.parse.urlencode(
|
||||
encode_urlencode_args(args), True).encode("utf8")
|
||||
query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True).encode(
|
||||
"utf8"
|
||||
)
|
||||
|
||||
actual_headers = {
|
||||
b"Content-Type": [b"application/x-www-form-urlencoded"],
|
||||
@@ -151,15 +330,13 @@ class SimpleHttpClient(object):
|
||||
actual_headers.update(headers)
|
||||
|
||||
response = yield self.request(
|
||||
"POST",
|
||||
uri,
|
||||
headers=Headers(actual_headers),
|
||||
data=query_bytes
|
||||
"POST", uri, headers=Headers(actual_headers), data=query_bytes
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
body = yield make_deferred_yieldable(treq.json_content(response))
|
||||
defer.returnValue(body)
|
||||
defer.returnValue(json.loads(body))
|
||||
else:
|
||||
raise HttpResponseException(response.code, response.phrase, body)
|
||||
|
||||
@@ -193,10 +370,7 @@ class SimpleHttpClient(object):
|
||||
actual_headers.update(headers)
|
||||
|
||||
response = yield self.request(
|
||||
"POST",
|
||||
uri,
|
||||
headers=Headers(actual_headers),
|
||||
data=json_str
|
||||
"POST", uri, headers=Headers(actual_headers), data=json_str
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
@@ -264,10 +438,7 @@ class SimpleHttpClient(object):
|
||||
actual_headers.update(headers)
|
||||
|
||||
response = yield self.request(
|
||||
"PUT",
|
||||
uri,
|
||||
headers=Headers(actual_headers),
|
||||
data=json_str
|
||||
"PUT", uri, headers=Headers(actual_headers), data=json_str
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
@@ -299,17 +470,11 @@ class SimpleHttpClient(object):
|
||||
query_bytes = urllib.parse.urlencode(args, True)
|
||||
uri = "%s?%s" % (uri, query_bytes)
|
||||
|
||||
actual_headers = {
|
||||
b"User-Agent": [self.user_agent],
|
||||
}
|
||||
actual_headers = {b"User-Agent": [self.user_agent]}
|
||||
if headers:
|
||||
actual_headers.update(headers)
|
||||
|
||||
response = yield self.request(
|
||||
"GET",
|
||||
uri,
|
||||
headers=Headers(actual_headers),
|
||||
)
|
||||
response = yield self.request("GET", uri, headers=Headers(actual_headers))
|
||||
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
|
||||
@@ -334,22 +499,18 @@ class SimpleHttpClient(object):
|
||||
headers, absolute URI of the response and HTTP response code.
|
||||
"""
|
||||
|
||||
actual_headers = {
|
||||
b"User-Agent": [self.user_agent],
|
||||
}
|
||||
actual_headers = {b"User-Agent": [self.user_agent]}
|
||||
if headers:
|
||||
actual_headers.update(headers)
|
||||
|
||||
response = yield self.request(
|
||||
"GET",
|
||||
url,
|
||||
headers=Headers(actual_headers),
|
||||
)
|
||||
response = yield self.request("GET", url, headers=Headers(actual_headers))
|
||||
|
||||
resp_headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
if (b'Content-Length' in resp_headers and
|
||||
int(resp_headers[b'Content-Length']) > max_size):
|
||||
if (
|
||||
b'Content-Length' in resp_headers
|
||||
and int(resp_headers[b'Content-Length'][0]) > max_size
|
||||
):
|
||||
logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
|
||||
raise SynapseError(
|
||||
502,
|
||||
@@ -359,26 +520,20 @@ class SimpleHttpClient(object):
|
||||
|
||||
if response.code > 299:
|
||||
logger.warn("Got %d when downloading %s" % (response.code, url))
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Got error %d" % (response.code,),
|
||||
Codes.UNKNOWN,
|
||||
)
|
||||
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
|
||||
|
||||
# TODO: if our Content-Type is HTML or something, just read the first
|
||||
# N bytes into RAM rather than saving it all to disk only to read it
|
||||
# straight back in again
|
||||
|
||||
try:
|
||||
length = yield make_deferred_yieldable(_readBodyToFile(
|
||||
response, output_stream, max_size,
|
||||
))
|
||||
length = yield make_deferred_yieldable(
|
||||
_readBodyToFile(response, output_stream, max_size)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to download body")
|
||||
raise SynapseError(
|
||||
502,
|
||||
("Failed to download remote body: %s" % e),
|
||||
Codes.UNKNOWN,
|
||||
502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
@@ -387,13 +542,14 @@ class SimpleHttpClient(object):
|
||||
resp_headers,
|
||||
response.request.absoluteURI.decode('ascii'),
|
||||
response.code,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
|
||||
# The two should be factored out.
|
||||
|
||||
|
||||
class _ReadBodyToFileProtocol(protocol.Protocol):
|
||||
def __init__(self, stream, deferred, max_size):
|
||||
self.stream = stream
|
||||
@@ -405,11 +561,13 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
|
||||
self.stream.write(data)
|
||||
self.length += len(data)
|
||||
if self.max_size is not None and self.length >= self.max_size:
|
||||
self.deferred.errback(SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (self.max_size,),
|
||||
Codes.TOO_LARGE,
|
||||
))
|
||||
self.deferred.errback(
|
||||
SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (self.max_size,),
|
||||
Codes.TOO_LARGE,
|
||||
)
|
||||
)
|
||||
self.deferred = defer.Deferred()
|
||||
self.transport.loseConnection()
|
||||
|
||||
@@ -427,6 +585,7 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
|
||||
# XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
|
||||
# The two should be factored out.
|
||||
|
||||
|
||||
def _readBodyToFile(response, stream, max_size):
|
||||
d = defer.Deferred()
|
||||
response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size))
|
||||
@@ -449,10 +608,12 @@ class CaptchaServerHttpClient(SimpleHttpClient):
|
||||
"POST",
|
||||
url,
|
||||
data=query_bytes,
|
||||
headers=Headers({
|
||||
b"Content-Type": [b"application/x-www-form-urlencoded"],
|
||||
b"User-Agent": [self.user_agent],
|
||||
})
|
||||
headers=Headers(
|
||||
{
|
||||
b"Content-Type": [b"application/x-www-form-urlencoded"],
|
||||
b"User-Agent": [self.user_agent],
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -463,57 +624,6 @@ class CaptchaServerHttpClient(SimpleHttpClient):
|
||||
defer.returnValue(e.response)
|
||||
|
||||
|
||||
class SpiderEndpointFactory(object):
|
||||
def __init__(self, hs):
|
||||
self.blacklist = hs.config.url_preview_ip_range_blacklist
|
||||
self.whitelist = hs.config.url_preview_ip_range_whitelist
|
||||
self.policyForHTTPS = hs.get_http_client_context_factory()
|
||||
|
||||
def endpointForURI(self, uri):
|
||||
logger.info("Getting endpoint for %s", uri.toBytes())
|
||||
|
||||
if uri.scheme == b"http":
|
||||
endpoint_factory = HostnameEndpoint
|
||||
elif uri.scheme == b"https":
|
||||
tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port)
|
||||
|
||||
def endpoint_factory(reactor, host, port, **kw):
|
||||
return wrapClientTLS(
|
||||
tlsCreator,
|
||||
HostnameEndpoint(reactor, host, port, **kw))
|
||||
else:
|
||||
logger.warn("Can't get endpoint for unrecognised scheme %s", uri.scheme)
|
||||
return None
|
||||
return SpiderEndpoint(
|
||||
reactor, uri.host, uri.port, self.blacklist, self.whitelist,
|
||||
endpoint=endpoint_factory, endpoint_kw_args=dict(timeout=15),
|
||||
)
|
||||
|
||||
|
||||
class SpiderHttpClient(SimpleHttpClient):
|
||||
"""
|
||||
Separate HTTP client for spidering arbitrary URLs.
|
||||
Special in that it follows retries and has a UA that looks
|
||||
like a browser.
|
||||
|
||||
used by the preview_url endpoint in the content repo.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
SimpleHttpClient.__init__(self, hs)
|
||||
# clobber the base class's agent and UA:
|
||||
self.agent = ContentDecoderAgent(
|
||||
BrowserLikeRedirectAgent(
|
||||
Agent.usingEndpointFactory(
|
||||
reactor,
|
||||
SpiderEndpointFactory(hs)
|
||||
)
|
||||
), [(b'gzip', GzipDecoder)]
|
||||
)
|
||||
# We could look like Chrome:
|
||||
# self.user_agent = ("Mozilla/5.0 (%s) (KHTML, like Gecko)
|
||||
# Chrome Safari" % hs.version_string)
|
||||
|
||||
|
||||
def encode_urlencode_args(args):
|
||||
return {k: encode_urlencode_arg(v) for k, v in args.items()}
|
||||
|
||||
|
||||
@@ -12,30 +12,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.names import client, dns
|
||||
from twisted.names.error import DNSNameError, DomainError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SERVER_CACHE = {}
|
||||
|
||||
# our record of an individual server which can be tried to reach a destination.
|
||||
#
|
||||
# "host" is the hostname acquired from the SRV record. Except when there's
|
||||
# no SRV record, in which case it is the original hostname.
|
||||
_Server = collections.namedtuple(
|
||||
"_Server", "priority weight host port expires"
|
||||
)
|
||||
|
||||
|
||||
def parse_server_name(server_name):
|
||||
"""Split a server name into host/port parts.
|
||||
@@ -100,299 +81,3 @@ def parse_and_validate_server_name(server_name):
|
||||
))
|
||||
|
||||
return host, port
|
||||
|
||||
|
||||
def matrix_federation_endpoint(reactor, destination, tls_client_options_factory=None,
|
||||
timeout=None):
|
||||
"""Construct an endpoint for the given matrix destination.
|
||||
|
||||
Args:
|
||||
reactor: Twisted reactor.
|
||||
destination (unicode): The name of the server to connect to.
|
||||
tls_client_options_factory
|
||||
(synapse.crypto.context_factory.ClientTLSOptionsFactory):
|
||||
Factory which generates TLS options for client connections.
|
||||
timeout (int): connection timeout in seconds
|
||||
"""
|
||||
|
||||
domain, port = parse_server_name(destination)
|
||||
|
||||
endpoint_kw_args = {}
|
||||
|
||||
if timeout is not None:
|
||||
endpoint_kw_args.update(timeout=timeout)
|
||||
|
||||
if tls_client_options_factory is None:
|
||||
transport_endpoint = HostnameEndpoint
|
||||
default_port = 8008
|
||||
else:
|
||||
# the SNI string should be the same as the Host header, minus the port.
|
||||
# as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777,
|
||||
# the Host header and SNI should therefore be the server_name of the remote
|
||||
# server.
|
||||
tls_options = tls_client_options_factory.get_options(domain)
|
||||
|
||||
def transport_endpoint(reactor, host, port, timeout):
|
||||
return wrapClientTLS(
|
||||
tls_options,
|
||||
HostnameEndpoint(reactor, host, port, timeout=timeout),
|
||||
)
|
||||
default_port = 8448
|
||||
|
||||
if port is None:
|
||||
return _WrappingEndpointFac(SRVClientEndpoint(
|
||||
reactor, "matrix", domain, protocol="tcp",
|
||||
default_port=default_port, endpoint=transport_endpoint,
|
||||
endpoint_kw_args=endpoint_kw_args
|
||||
), reactor)
|
||||
else:
|
||||
return _WrappingEndpointFac(transport_endpoint(
|
||||
reactor, domain, port, **endpoint_kw_args
|
||||
), reactor)
|
||||
|
||||
|
||||
class _WrappingEndpointFac(object):
|
||||
def __init__(self, endpoint_fac, reactor):
|
||||
self.endpoint_fac = endpoint_fac
|
||||
self.reactor = reactor
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def connect(self, protocolFactory):
|
||||
conn = yield self.endpoint_fac.connect(protocolFactory)
|
||||
conn = _WrappedConnection(conn, self.reactor)
|
||||
defer.returnValue(conn)
|
||||
|
||||
|
||||
class _WrappedConnection(object):
|
||||
"""Wraps a connection and calls abort on it if it hasn't seen any action
|
||||
for 2.5-3 minutes.
|
||||
"""
|
||||
__slots__ = ["conn", "last_request"]
|
||||
|
||||
def __init__(self, conn, reactor):
|
||||
object.__setattr__(self, "conn", conn)
|
||||
object.__setattr__(self, "last_request", time.time())
|
||||
self._reactor = reactor
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.conn, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
setattr(self.conn, name, value)
|
||||
|
||||
def _time_things_out_maybe(self):
|
||||
# We use a slightly shorter timeout here just in case the callLater is
|
||||
# triggered early. Paranoia ftw.
|
||||
# TODO: Cancel the previous callLater rather than comparing time.time()?
|
||||
if time.time() - self.last_request >= 2.5 * 60:
|
||||
self.abort()
|
||||
# Abort the underlying TLS connection. The abort() method calls
|
||||
# loseConnection() on the TLS connection which tries to
|
||||
# shutdown the connection cleanly. We call abortConnection()
|
||||
# since that will promptly close the TLS connection.
|
||||
#
|
||||
# In Twisted >18.4; the TLS connection will be None if it has closed
|
||||
# which will make abortConnection() throw. Check that the TLS connection
|
||||
# is not None before trying to close it.
|
||||
if self.transport.getHandle() is not None:
|
||||
self.transport.abortConnection()
|
||||
|
||||
def request(self, request):
|
||||
self.last_request = time.time()
|
||||
|
||||
# Time this connection out if we haven't send a request in the last
|
||||
# N minutes
|
||||
# TODO: Cancel the previous callLater?
|
||||
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
|
||||
|
||||
d = self.conn.request(request)
|
||||
|
||||
def update_request_time(res):
|
||||
self.last_request = time.time()
|
||||
# TODO: Cancel the previous callLater?
|
||||
self._reactor.callLater(3 * 60, self._time_things_out_maybe)
|
||||
return res
|
||||
|
||||
d.addCallback(update_request_time)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class SpiderEndpoint(object):
|
||||
"""An endpoint which refuses to connect to blacklisted IP addresses
|
||||
Implements twisted.internet.interfaces.IStreamClientEndpoint.
|
||||
"""
|
||||
def __init__(self, reactor, host, port, blacklist, whitelist,
|
||||
endpoint=HostnameEndpoint, endpoint_kw_args={}):
|
||||
self.reactor = reactor
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.blacklist = blacklist
|
||||
self.whitelist = whitelist
|
||||
self.endpoint = endpoint
|
||||
self.endpoint_kw_args = endpoint_kw_args
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def connect(self, protocolFactory):
|
||||
address = yield self.reactor.resolve(self.host)
|
||||
|
||||
from netaddr import IPAddress
|
||||
ip_address = IPAddress(address)
|
||||
|
||||
if ip_address in self.blacklist:
|
||||
if self.whitelist is None or ip_address not in self.whitelist:
|
||||
raise ConnectError(
|
||||
"Refusing to spider blacklisted IP address %s" % address
|
||||
)
|
||||
|
||||
logger.info("Connecting to %s:%s", address, self.port)
|
||||
endpoint = self.endpoint(
|
||||
self.reactor, address, self.port, **self.endpoint_kw_args
|
||||
)
|
||||
connection = yield endpoint.connect(protocolFactory)
|
||||
defer.returnValue(connection)
|
||||
|
||||
|
||||
class SRVClientEndpoint(object):
|
||||
"""An endpoint which looks up SRV records for a service.
|
||||
Cycles through the list of servers starting with each call to connect
|
||||
picking the next server.
|
||||
Implements twisted.internet.interfaces.IStreamClientEndpoint.
|
||||
"""
|
||||
|
||||
def __init__(self, reactor, service, domain, protocol="tcp",
|
||||
default_port=None, endpoint=HostnameEndpoint,
|
||||
endpoint_kw_args={}):
|
||||
self.reactor = reactor
|
||||
self.service_name = "_%s._%s.%s" % (service, protocol, domain)
|
||||
|
||||
if default_port is not None:
|
||||
self.default_server = _Server(
|
||||
host=domain,
|
||||
port=default_port,
|
||||
priority=0,
|
||||
weight=0,
|
||||
expires=0,
|
||||
)
|
||||
else:
|
||||
self.default_server = None
|
||||
|
||||
self.endpoint = endpoint
|
||||
self.endpoint_kw_args = endpoint_kw_args
|
||||
|
||||
self.servers = None
|
||||
self.used_servers = None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_servers(self):
|
||||
self.used_servers = []
|
||||
self.servers = yield resolve_service(self.service_name)
|
||||
|
||||
def pick_server(self):
|
||||
if not self.servers:
|
||||
if self.used_servers:
|
||||
self.servers = self.used_servers
|
||||
self.used_servers = []
|
||||
self.servers.sort()
|
||||
elif self.default_server:
|
||||
return self.default_server
|
||||
else:
|
||||
raise ConnectError(
|
||||
"No server available for %s" % self.service_name
|
||||
)
|
||||
|
||||
# look for all servers with the same priority
|
||||
min_priority = self.servers[0].priority
|
||||
weight_indexes = list(
|
||||
(index, server.weight + 1)
|
||||
for index, server in enumerate(self.servers)
|
||||
if server.priority == min_priority
|
||||
)
|
||||
|
||||
total_weight = sum(weight for index, weight in weight_indexes)
|
||||
target_weight = random.randint(0, total_weight)
|
||||
for index, weight in weight_indexes:
|
||||
target_weight -= weight
|
||||
if target_weight <= 0:
|
||||
server = self.servers[index]
|
||||
# XXX: this looks totally dubious:
|
||||
#
|
||||
# (a) we never reuse a server until we have been through
|
||||
# all of the servers at the same priority, so if the
|
||||
# weights are A: 100, B:1, we always do ABABAB instead of
|
||||
# AAAA...AAAB (approximately).
|
||||
#
|
||||
# (b) After using all the servers at the lowest priority,
|
||||
# we move onto the next priority. We should only use the
|
||||
# second priority if servers at the top priority are
|
||||
# unreachable.
|
||||
#
|
||||
del self.servers[index]
|
||||
self.used_servers.append(server)
|
||||
return server
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def connect(self, protocolFactory):
|
||||
if self.servers is None:
|
||||
yield self.fetch_servers()
|
||||
server = self.pick_server()
|
||||
logger.info("Connecting to %s:%s", server.host, server.port)
|
||||
endpoint = self.endpoint(
|
||||
self.reactor, server.host, server.port, **self.endpoint_kw_args
|
||||
)
|
||||
connection = yield endpoint.connect(protocolFactory)
|
||||
defer.returnValue(connection)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=time):
|
||||
cache_entry = cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
if all(s.expires > int(clock.time()) for s in cache_entry):
|
||||
servers = list(cache_entry)
|
||||
defer.returnValue(servers)
|
||||
|
||||
servers = []
|
||||
|
||||
try:
|
||||
try:
|
||||
answers, _, _ = yield dns_client.lookupService(service_name)
|
||||
except DNSNameError:
|
||||
defer.returnValue([])
|
||||
|
||||
if (len(answers) == 1
|
||||
and answers[0].type == dns.SRV
|
||||
and answers[0].payload
|
||||
and answers[0].payload.target == dns.Name(b'.')):
|
||||
raise ConnectError("Service %s unavailable" % service_name)
|
||||
|
||||
for answer in answers:
|
||||
if answer.type != dns.SRV or not answer.payload:
|
||||
continue
|
||||
|
||||
payload = answer.payload
|
||||
|
||||
servers.append(_Server(
|
||||
host=str(payload.target),
|
||||
port=int(payload.port),
|
||||
priority=int(payload.priority),
|
||||
weight=int(payload.weight),
|
||||
expires=int(clock.time()) + answer.ttl,
|
||||
))
|
||||
|
||||
servers.sort()
|
||||
cache[service_name] = list(servers)
|
||||
except DomainError as e:
|
||||
# We failed to resolve the name (other than a NameError)
|
||||
# Try something in the cache, else rereaise
|
||||
cache_entry = cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
logger.warn(
|
||||
"Failed to resolve %r, falling back to cache. %r",
|
||||
service_name, e
|
||||
)
|
||||
servers = list(cache_entry)
|
||||
else:
|
||||
raise e
|
||||
|
||||
defer.returnValue(servers)
|
||||
|
||||
14
synapse/http/federation/__init__.py
Normal file
14
synapse/http/federation/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
424
synapse/http/federation/matrix_federation_agent.py
Normal file
424
synapse/http/federation/matrix_federation_agent.py
Normal file
@@ -0,0 +1,424 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
import attr
|
||||
from netaddr import IPAddress
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
|
||||
from twisted.web.http import stringToDatetime
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent
|
||||
|
||||
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
|
||||
from synapse.util.caches.ttlcache import TTLCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
# period to cache .well-known results for by default
|
||||
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
|
||||
|
||||
# jitter to add to the .well-known default cache ttl
|
||||
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
|
||||
|
||||
# period to cache failure to fetch .well-known for
|
||||
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
|
||||
|
||||
# cap for .well-known cache period
|
||||
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
well_known_cache = TTLCache('well-known')
|
||||
|
||||
|
||||
@implementer(IAgent)
|
||||
class MatrixFederationAgent(object):
|
||||
"""An Agent-like thing which provides a `request` method which will look up a matrix
|
||||
server and send an HTTP request to it.
|
||||
|
||||
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
|
||||
|
||||
Args:
|
||||
reactor (IReactor): twisted reactor to use for underlying requests
|
||||
|
||||
tls_client_options_factory (ClientTLSOptionsFactory|None):
|
||||
factory to use for fetching client tls options, or none to disable TLS.
|
||||
|
||||
_well_known_tls_policy (IPolicyForHTTPS|None):
|
||||
TLS policy to use for fetching .well-known files. None to use a default
|
||||
(browser-like) implementation.
|
||||
|
||||
srv_resolver (SrvResolver|None):
|
||||
SRVResolver impl to use for looking up SRV records. None to use a default
|
||||
implementation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, reactor, tls_client_options_factory,
|
||||
_well_known_tls_policy=None,
|
||||
_srv_resolver=None,
|
||||
_well_known_cache=well_known_cache,
|
||||
):
|
||||
self._reactor = reactor
|
||||
self._tls_client_options_factory = tls_client_options_factory
|
||||
if _srv_resolver is None:
|
||||
_srv_resolver = SrvResolver()
|
||||
self._srv_resolver = _srv_resolver
|
||||
|
||||
self._pool = HTTPConnectionPool(reactor)
|
||||
self._pool.retryAutomatically = False
|
||||
self._pool.maxPersistentPerHost = 5
|
||||
self._pool.cachedConnectionTimeout = 2 * 60
|
||||
|
||||
agent_args = {}
|
||||
if _well_known_tls_policy is not None:
|
||||
# the param is called 'contextFactory', but actually passing a
|
||||
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
|
||||
agent_args['contextFactory'] = _well_known_tls_policy
|
||||
_well_known_agent = RedirectAgent(
|
||||
Agent(self._reactor, pool=self._pool, **agent_args),
|
||||
)
|
||||
self._well_known_agent = _well_known_agent
|
||||
|
||||
self._well_known_cache = _well_known_cache
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, headers=None, bodyProducer=None):
|
||||
"""
|
||||
Args:
|
||||
method (bytes): HTTP method: GET/POST/etc
|
||||
|
||||
uri (bytes): Absolute URI to be retrieved
|
||||
|
||||
headers (twisted.web.http_headers.Headers|None):
|
||||
HTTP headers to send with the request, or None to
|
||||
send no extra headers.
|
||||
|
||||
bodyProducer (twisted.web.iweb.IBodyProducer|None):
|
||||
An object which can generate bytes to make up the
|
||||
body of this request (for example, the properly encoded contents of
|
||||
a file for a file upload). Or None if the request is to have
|
||||
no body.
|
||||
|
||||
Returns:
|
||||
Deferred[twisted.web.iweb.IResponse]:
|
||||
fires when the header of the response has been received (regardless of the
|
||||
response status code). Fails if there is any problem which prevents that
|
||||
response from being received (including problems that prevent the request
|
||||
from being sent).
|
||||
"""
|
||||
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
|
||||
res = yield self._route_matrix_uri(parsed_uri)
|
||||
|
||||
# set up the TLS connection params
|
||||
#
|
||||
# XXX disabling TLS is really only supported here for the benefit of the
|
||||
# unit tests. We should make the UTs cope with TLS rather than having to make
|
||||
# the code support the unit tests.
|
||||
if self._tls_client_options_factory is None:
|
||||
tls_options = None
|
||||
else:
|
||||
tls_options = self._tls_client_options_factory.get_options(
|
||||
res.tls_server_name.decode("ascii")
|
||||
)
|
||||
|
||||
# make sure that the Host header is set correctly
|
||||
if headers is None:
|
||||
headers = Headers()
|
||||
else:
|
||||
headers = headers.copy()
|
||||
|
||||
if not headers.hasHeader(b'host'):
|
||||
headers.addRawHeader(b'host', res.host_header)
|
||||
|
||||
class EndpointFactory(object):
|
||||
@staticmethod
|
||||
def endpointForURI(_uri):
|
||||
logger.info(
|
||||
"Connecting to %s:%i",
|
||||
res.target_host.decode("ascii"),
|
||||
res.target_port,
|
||||
)
|
||||
ep = HostnameEndpoint(self._reactor, res.target_host, res.target_port)
|
||||
if tls_options is not None:
|
||||
ep = wrapClientTLS(tls_options, ep)
|
||||
return ep
|
||||
|
||||
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
|
||||
res = yield make_deferred_yieldable(
|
||||
agent.request(method, uri, headers, bodyProducer)
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
|
||||
"""Helper for `request`: determine the routing for a Matrix URI
|
||||
|
||||
Args:
|
||||
parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
|
||||
parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
|
||||
if there is no explicit port given.
|
||||
|
||||
lookup_well_known (bool): True if we should look up the .well-known file if
|
||||
there is no SRV record.
|
||||
|
||||
Returns:
|
||||
Deferred[_RoutingResult]
|
||||
"""
|
||||
# check for an IP literal
|
||||
try:
|
||||
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
|
||||
except Exception:
|
||||
# not an IP address
|
||||
ip_address = None
|
||||
|
||||
if ip_address:
|
||||
port = parsed_uri.port
|
||||
if port == -1:
|
||||
port = 8448
|
||||
defer.returnValue(_RoutingResult(
|
||||
host_header=parsed_uri.netloc,
|
||||
tls_server_name=parsed_uri.host,
|
||||
target_host=parsed_uri.host,
|
||||
target_port=port,
|
||||
))
|
||||
|
||||
if parsed_uri.port != -1:
|
||||
# there is an explicit port
|
||||
defer.returnValue(_RoutingResult(
|
||||
host_header=parsed_uri.netloc,
|
||||
tls_server_name=parsed_uri.host,
|
||||
target_host=parsed_uri.host,
|
||||
target_port=parsed_uri.port,
|
||||
))
|
||||
|
||||
# try a SRV lookup
|
||||
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
|
||||
server_list = yield self._srv_resolver.resolve_service(service_name)
|
||||
|
||||
if not server_list and lookup_well_known:
|
||||
# try a .well-known lookup
|
||||
well_known_server = yield self._get_well_known(parsed_uri.host)
|
||||
|
||||
if well_known_server:
|
||||
# if we found a .well-known, start again, but don't do another
|
||||
# .well-known lookup.
|
||||
|
||||
# parse the server name in the .well-known response into host/port.
|
||||
# (This code is lifted from twisted.web.client.URI.fromBytes).
|
||||
if b':' in well_known_server:
|
||||
well_known_host, well_known_port = well_known_server.rsplit(b':', 1)
|
||||
try:
|
||||
well_known_port = int(well_known_port)
|
||||
except ValueError:
|
||||
# the part after the colon could not be parsed as an int
|
||||
# - we assume it is an IPv6 literal with no port (the closing
|
||||
# ']' stops it being parsed as an int)
|
||||
well_known_host, well_known_port = well_known_server, -1
|
||||
else:
|
||||
well_known_host, well_known_port = well_known_server, -1
|
||||
|
||||
new_uri = URI(
|
||||
scheme=parsed_uri.scheme,
|
||||
netloc=well_known_server,
|
||||
host=well_known_host,
|
||||
port=well_known_port,
|
||||
path=parsed_uri.path,
|
||||
params=parsed_uri.params,
|
||||
query=parsed_uri.query,
|
||||
fragment=parsed_uri.fragment,
|
||||
)
|
||||
|
||||
res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
|
||||
defer.returnValue(res)
|
||||
|
||||
if not server_list:
|
||||
target_host = parsed_uri.host
|
||||
port = 8448
|
||||
logger.debug(
|
||||
"No SRV record for %s, using %s:%i",
|
||||
parsed_uri.host.decode("ascii"), target_host.decode("ascii"), port,
|
||||
)
|
||||
else:
|
||||
target_host, port = pick_server_from_list(server_list)
|
||||
logger.debug(
|
||||
"Picked %s:%i from SRV records for %s",
|
||||
target_host.decode("ascii"), port, parsed_uri.host.decode("ascii"),
|
||||
)
|
||||
|
||||
defer.returnValue(_RoutingResult(
|
||||
host_header=parsed_uri.netloc,
|
||||
tls_server_name=parsed_uri.host,
|
||||
target_host=target_host,
|
||||
target_port=port,
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_well_known(self, server_name):
|
||||
"""Attempt to fetch and parse a .well-known file for the given server
|
||||
|
||||
Args:
|
||||
server_name (bytes): name of the server, from the requested url
|
||||
|
||||
Returns:
|
||||
Deferred[bytes|None]: either the new server name, from the .well-known, or
|
||||
None if there was no .well-known file.
|
||||
"""
|
||||
try:
|
||||
cached = self._well_known_cache[server_name]
|
||||
defer.returnValue(cached)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# TODO: should we linearise so that we don't end up doing two .well-known requests
|
||||
# for the same server in parallel?
|
||||
|
||||
uri = b"https://%s/.well-known/matrix/server" % (server_name, )
|
||||
uri_str = uri.decode("ascii")
|
||||
logger.info("Fetching %s", uri_str)
|
||||
try:
|
||||
response = yield make_deferred_yieldable(
|
||||
self._well_known_agent.request(b"GET", uri),
|
||||
)
|
||||
body = yield make_deferred_yieldable(readBody(response))
|
||||
if response.code != 200:
|
||||
raise Exception("Non-200 response %s" % (response.code, ))
|
||||
except Exception as e:
|
||||
logger.info("Error fetching %s: %s", uri_str, e)
|
||||
|
||||
# add some randomness to the TTL to avoid a stampeding herd every hour
|
||||
# after startup
|
||||
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
|
||||
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
|
||||
|
||||
self._well_known_cache.set(server_name, None, cache_period)
|
||||
defer.returnValue(None)
|
||||
|
||||
try:
|
||||
parsed_body = json.loads(body.decode('utf-8'))
|
||||
logger.info("Response from .well-known: %s", parsed_body)
|
||||
if not isinstance(parsed_body, dict):
|
||||
raise Exception("not a dict")
|
||||
if "m.server" not in parsed_body:
|
||||
raise Exception("Missing key 'm.server'")
|
||||
except Exception as e:
|
||||
raise Exception("invalid .well-known response from %s: %s" % (uri_str, e,))
|
||||
|
||||
result = parsed_body["m.server"].encode("ascii")
|
||||
|
||||
cache_period = _cache_period_from_headers(
|
||||
response.headers,
|
||||
time_now=self._reactor.seconds,
|
||||
)
|
||||
if cache_period is None:
|
||||
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
|
||||
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
|
||||
# after startup
|
||||
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
|
||||
else:
|
||||
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
|
||||
|
||||
if cache_period > 0:
|
||||
self._well_known_cache.set(server_name, result, cache_period)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
|
||||
def _cache_period_from_headers(headers, time_now=time.time):
|
||||
cache_controls = _parse_cache_control(headers)
|
||||
|
||||
if b'no-store' in cache_controls:
|
||||
return 0
|
||||
|
||||
if b'max-age' in cache_controls:
|
||||
try:
|
||||
max_age = int(cache_controls[b'max-age'])
|
||||
return max_age
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
expires = headers.getRawHeaders(b'expires')
|
||||
if expires is not None:
|
||||
try:
|
||||
expires_date = stringToDatetime(expires[-1])
|
||||
return expires_date - time_now()
|
||||
except ValueError:
|
||||
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
|
||||
# especially the value "0", as representing a time in the past (i.e.,
|
||||
# "already expired").
|
||||
return 0
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _parse_cache_control(headers):
|
||||
cache_controls = {}
|
||||
for hdr in headers.getRawHeaders(b'cache-control', []):
|
||||
for directive in hdr.split(b','):
|
||||
splits = [x.strip() for x in directive.split(b'=', 1)]
|
||||
k = splits[0].lower()
|
||||
v = splits[1] if len(splits) > 1 else None
|
||||
cache_controls[k] = v
|
||||
return cache_controls
|
||||
|
||||
|
||||
@attr.s
|
||||
class _RoutingResult(object):
|
||||
"""The result returned by `_route_matrix_uri`.
|
||||
|
||||
Contains the parameters needed to direct a federation connection to a particular
|
||||
server.
|
||||
|
||||
Where a SRV record points to several servers, this object contains a single server
|
||||
chosen from the list.
|
||||
"""
|
||||
|
||||
host_header = attr.ib()
|
||||
"""
|
||||
The value we should assign to the Host header (host:port from the matrix
|
||||
URI, or .well-known).
|
||||
|
||||
:type: bytes
|
||||
"""
|
||||
|
||||
tls_server_name = attr.ib()
|
||||
"""
|
||||
The server name we should set in the SNI (typically host, without port, from the
|
||||
matrix URI or .well-known)
|
||||
|
||||
:type: bytes
|
||||
"""
|
||||
|
||||
target_host = attr.ib()
|
||||
"""
|
||||
The hostname (or IP literal) we should route the TCP connection to (the target of the
|
||||
SRV record, or the hostname from the URL/.well-known)
|
||||
|
||||
:type: bytes
|
||||
"""
|
||||
|
||||
target_port = attr.ib()
|
||||
"""
|
||||
The port we should route the TCP connection to (the target of the SRV record, or
|
||||
the port from the URL/.well-known, or 8448)
|
||||
|
||||
:type: int
|
||||
"""
|
||||
169
synapse/http/federation/srv_resolver.py
Normal file
169
synapse/http/federation/srv_resolver.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2019 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.names import client, dns
|
||||
from twisted.names.error import DNSNameError, DomainError
|
||||
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SERVER_CACHE = {}
|
||||
|
||||
|
||||
@attr.s
|
||||
class Server(object):
|
||||
"""
|
||||
Our record of an individual server which can be tried to reach a destination.
|
||||
|
||||
Attributes:
|
||||
host (bytes): target hostname
|
||||
port (int):
|
||||
priority (int):
|
||||
weight (int):
|
||||
expires (int): when the cache should expire this record - in *seconds* since
|
||||
the epoch
|
||||
"""
|
||||
host = attr.ib()
|
||||
port = attr.ib()
|
||||
priority = attr.ib(default=0)
|
||||
weight = attr.ib(default=0)
|
||||
expires = attr.ib(default=0)
|
||||
|
||||
|
||||
def pick_server_from_list(server_list):
|
||||
"""Randomly choose a server from the server list
|
||||
|
||||
Args:
|
||||
server_list (list[Server]): list of candidate servers
|
||||
|
||||
Returns:
|
||||
Tuple[bytes, int]: (host, port) pair for the chosen server
|
||||
"""
|
||||
if not server_list:
|
||||
raise RuntimeError("pick_server_from_list called with empty list")
|
||||
|
||||
# TODO: currently we only use the lowest-priority servers. We should maintain a
|
||||
# cache of servers known to be "down" and filter them out
|
||||
|
||||
min_priority = min(s.priority for s in server_list)
|
||||
eligible_servers = list(s for s in server_list if s.priority == min_priority)
|
||||
total_weight = sum(s.weight for s in eligible_servers)
|
||||
target_weight = random.randint(0, total_weight)
|
||||
|
||||
for s in eligible_servers:
|
||||
target_weight -= s.weight
|
||||
|
||||
if target_weight <= 0:
|
||||
return s.host, s.port
|
||||
|
||||
# this should be impossible.
|
||||
raise RuntimeError(
|
||||
"pick_server_from_list got to end of eligible server list.",
|
||||
)
|
||||
|
||||
|
||||
class SrvResolver(object):
|
||||
"""Interface to the dns client to do SRV lookups, with result caching.
|
||||
|
||||
The default resolver in twisted.names doesn't do any caching (it has a CacheResolver,
|
||||
but the cache never gets populated), so we add our own caching layer here.
|
||||
|
||||
Args:
|
||||
dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl
|
||||
cache (dict): cache object
|
||||
get_time (callable): clock implementation. Should return seconds since the epoch
|
||||
"""
|
||||
def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time):
|
||||
self._dns_client = dns_client
|
||||
self._cache = cache
|
||||
self._get_time = get_time
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def resolve_service(self, service_name):
|
||||
"""Look up a SRV record
|
||||
|
||||
Args:
|
||||
service_name (bytes): record to look up
|
||||
|
||||
Returns:
|
||||
Deferred[list[Server]]:
|
||||
a list of the SRV records, or an empty list if none found
|
||||
"""
|
||||
now = int(self._get_time())
|
||||
|
||||
if not isinstance(service_name, bytes):
|
||||
raise TypeError("%r is not a byte string" % (service_name,))
|
||||
|
||||
cache_entry = self._cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
if all(s.expires > now for s in cache_entry):
|
||||
servers = list(cache_entry)
|
||||
defer.returnValue(servers)
|
||||
|
||||
try:
|
||||
answers, _, _ = yield make_deferred_yieldable(
|
||||
self._dns_client.lookupService(service_name),
|
||||
)
|
||||
except DNSNameError:
|
||||
# TODO: cache this. We can get the SOA out of the exception, and use
|
||||
# the negative-TTL value.
|
||||
defer.returnValue([])
|
||||
except DomainError as e:
|
||||
# We failed to resolve the name (other than a NameError)
|
||||
# Try something in the cache, else rereaise
|
||||
cache_entry = self._cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
logger.warn(
|
||||
"Failed to resolve %r, falling back to cache. %r",
|
||||
service_name, e
|
||||
)
|
||||
defer.returnValue(list(cache_entry))
|
||||
else:
|
||||
raise e
|
||||
|
||||
if (len(answers) == 1
|
||||
and answers[0].type == dns.SRV
|
||||
and answers[0].payload
|
||||
and answers[0].payload.target == dns.Name(b'.')):
|
||||
raise ConnectError("Service %s unavailable" % service_name)
|
||||
|
||||
servers = []
|
||||
|
||||
for answer in answers:
|
||||
if answer.type != dns.SRV or not answer.payload:
|
||||
continue
|
||||
|
||||
payload = answer.payload
|
||||
|
||||
servers.append(Server(
|
||||
host=payload.target.name,
|
||||
port=payload.port,
|
||||
priority=payload.priority,
|
||||
weight=payload.weight,
|
||||
expires=now + answer.ttl,
|
||||
))
|
||||
|
||||
self._cache[service_name] = list(servers)
|
||||
defer.returnValue(servers)
|
||||
@@ -19,7 +19,7 @@ import random
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
from six import PY3, string_types
|
||||
from six import PY3, raise_from, string_types
|
||||
from six.moves import urllib
|
||||
|
||||
import attr
|
||||
@@ -32,7 +32,7 @@ from twisted.internet import defer, protocol
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.internet.task import _EPSILON, Cooperator
|
||||
from twisted.web._newclient import ResponseDone
|
||||
from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
|
||||
from twisted.web.client import FileBodyProducer
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
import synapse.metrics
|
||||
@@ -41,9 +41,10 @@ from synapse.api.errors import (
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
from synapse.util.metrics import Measure
|
||||
@@ -65,20 +66,6 @@ else:
|
||||
MAXINT = sys.maxint
|
||||
|
||||
|
||||
class MatrixFederationEndpointFactory(object):
|
||||
def __init__(self, hs):
|
||||
self.reactor = hs.get_reactor()
|
||||
self.tls_client_options_factory = hs.tls_client_options_factory
|
||||
|
||||
def endpointForURI(self, uri):
|
||||
destination = uri.netloc.decode('ascii')
|
||||
|
||||
return matrix_federation_endpoint(
|
||||
self.reactor, destination, timeout=10,
|
||||
tls_client_options_factory=self.tls_client_options_factory
|
||||
)
|
||||
|
||||
|
||||
_next_id = 1
|
||||
|
||||
|
||||
@@ -186,12 +173,10 @@ class MatrixFederationHttpClient(object):
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
reactor = hs.get_reactor()
|
||||
pool = HTTPConnectionPool(reactor)
|
||||
pool.retryAutomatically = False
|
||||
pool.maxPersistentPerHost = 5
|
||||
pool.cachedConnectionTimeout = 2 * 60
|
||||
self.agent = Agent.usingEndpointFactory(
|
||||
reactor, MatrixFederationEndpointFactory(hs), pool=pool
|
||||
|
||||
self.agent = MatrixFederationAgent(
|
||||
hs.get_reactor(),
|
||||
hs.tls_client_options_factory,
|
||||
)
|
||||
self.clock = hs.get_clock()
|
||||
self._store = hs.get_datastore()
|
||||
@@ -228,19 +213,18 @@ class MatrixFederationHttpClient(object):
|
||||
backoff_on_404 (bool): Back off if we get a 404
|
||||
|
||||
Returns:
|
||||
Deferred: resolves with the http response object on success.
|
||||
Deferred[twisted.web.client.Response]: resolves with the HTTP
|
||||
response object on success.
|
||||
|
||||
Fails with ``HttpResponseException``: if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
|
||||
(May also fail with plenty of other Exceptions for things like DNS
|
||||
failures, connection failures, SSL failures.)
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
if timeout:
|
||||
_sec_timeout = timeout / 1000
|
||||
@@ -271,7 +255,6 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
headers_dict = {
|
||||
b"User-Agent": [self.version_string_bytes],
|
||||
b"Host": [destination_bytes],
|
||||
}
|
||||
|
||||
with limiter:
|
||||
@@ -298,9 +281,9 @@ class MatrixFederationHttpClient(object):
|
||||
json = request.get_json()
|
||||
if json:
|
||||
headers_dict[b"Content-Type"] = [b"application/json"]
|
||||
self.sign_request(
|
||||
auth_headers = self.build_auth_headers(
|
||||
destination_bytes, method_bytes, url_to_sign_bytes,
|
||||
headers_dict, json,
|
||||
json,
|
||||
)
|
||||
data = encode_canonical_json(json)
|
||||
producer = FileBodyProducer(
|
||||
@@ -309,49 +292,100 @@ class MatrixFederationHttpClient(object):
|
||||
)
|
||||
else:
|
||||
producer = None
|
||||
self.sign_request(
|
||||
auth_headers = self.build_auth_headers(
|
||||
destination_bytes, method_bytes, url_to_sign_bytes,
|
||||
headers_dict,
|
||||
)
|
||||
|
||||
headers_dict[b"Authorization"] = auth_headers
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Sending request: %s %s",
|
||||
"{%s} [%s] Sending request: %s %s; timeout %fs",
|
||||
request.txn_id, request.destination, request.method,
|
||||
url_str,
|
||||
url_str, _sec_timeout,
|
||||
)
|
||||
|
||||
# we don't want all the fancy cookie and redirect handling that
|
||||
# treq.request gives: just use the raw Agent.
|
||||
request_deferred = self.agent.request(
|
||||
method_bytes,
|
||||
url_bytes,
|
||||
headers=Headers(headers_dict),
|
||||
bodyProducer=producer,
|
||||
try:
|
||||
with Measure(self.clock, "outbound_request"):
|
||||
# we don't want all the fancy cookie and redirect handling
|
||||
# that treq.request gives: just use the raw Agent.
|
||||
request_deferred = self.agent.request(
|
||||
method_bytes,
|
||||
url_bytes,
|
||||
headers=Headers(headers_dict),
|
||||
bodyProducer=producer,
|
||||
)
|
||||
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
timeout=_sec_timeout,
|
||||
reactor=self.hs.get_reactor(),
|
||||
)
|
||||
|
||||
response = yield request_deferred
|
||||
except DNSLookupError as e:
|
||||
raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
|
||||
except Exception as e:
|
||||
logger.info("Failed to send request: %s", e)
|
||||
raise_from(RequestSendFailed(e, can_retry=True), e)
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Got response headers: %d %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode('ascii', errors='replace'),
|
||||
)
|
||||
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
timeout=_sec_timeout,
|
||||
reactor=self.hs.get_reactor(),
|
||||
)
|
||||
|
||||
with Measure(self.clock, "outbound_request"):
|
||||
response = yield make_deferred_yieldable(
|
||||
request_deferred,
|
||||
if 200 <= response.code < 300:
|
||||
pass
|
||||
else:
|
||||
# :'(
|
||||
# Update transactions table?
|
||||
d = treq.content(response)
|
||||
d = timeout_deferred(
|
||||
d,
|
||||
timeout=_sec_timeout,
|
||||
reactor=self.hs.get_reactor(),
|
||||
)
|
||||
|
||||
try:
|
||||
body = yield make_deferred_yieldable(d)
|
||||
except Exception as e:
|
||||
# Eh, we're already going to raise an exception so lets
|
||||
# ignore if this fails.
|
||||
logger.warn(
|
||||
"{%s} [%s] Failed to get error response: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
url_str,
|
||||
_flatten_response_never_received(e),
|
||||
)
|
||||
body = None
|
||||
|
||||
e = HttpResponseException(
|
||||
response.code, response.phrase, body
|
||||
)
|
||||
|
||||
# Retry if the error is a 429 (Too Many Requests),
|
||||
# otherwise just raise a standard HttpResponseException
|
||||
if response.code == 429:
|
||||
raise_from(RequestSendFailed(e, can_retry=True), e)
|
||||
else:
|
||||
raise e
|
||||
|
||||
break
|
||||
except Exception as e:
|
||||
except RequestSendFailed as e:
|
||||
logger.warn(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
url_str,
|
||||
_flatten_response_never_received(e),
|
||||
_flatten_response_never_received(e.inner_exception),
|
||||
)
|
||||
|
||||
if not retry_on_dns_fail and isinstance(e, DNSLookupError):
|
||||
if not e.can_retry:
|
||||
raise
|
||||
|
||||
if retries_left and not timeout:
|
||||
@@ -376,50 +410,36 @@ class MatrixFederationHttpClient(object):
|
||||
else:
|
||||
raise
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Got response headers: %d %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode('ascii', errors='replace'),
|
||||
)
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
pass
|
||||
else:
|
||||
# :'(
|
||||
# Update transactions table?
|
||||
d = treq.content(response)
|
||||
d = timeout_deferred(
|
||||
d,
|
||||
timeout=_sec_timeout,
|
||||
reactor=self.hs.get_reactor(),
|
||||
)
|
||||
body = yield make_deferred_yieldable(d)
|
||||
raise HttpResponseException(
|
||||
response.code, response.phrase, body
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
url_str,
|
||||
_flatten_response_never_received(e),
|
||||
)
|
||||
raise
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
def sign_request(self, destination, method, url_bytes, headers_dict,
|
||||
content=None, destination_is=None):
|
||||
def build_auth_headers(
|
||||
self, destination, method, url_bytes, content=None, destination_is=None,
|
||||
):
|
||||
"""
|
||||
Signs a request by adding an Authorization header to headers_dict
|
||||
Builds the Authorization headers for a federation request
|
||||
Args:
|
||||
destination (bytes|None): The desination home server of the request.
|
||||
May be None if the destination is an identity server, in which case
|
||||
destination_is must be non-None.
|
||||
method (bytes): The HTTP method of the request
|
||||
url_bytes (bytes): The URI path of the request
|
||||
headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to
|
||||
append to
|
||||
content (object): The body of the request
|
||||
destination_is (bytes): As 'destination', but if the destination is an
|
||||
identity server
|
||||
|
||||
Returns:
|
||||
None
|
||||
list[bytes]: a list of headers to be added as "Authorization:" headers
|
||||
"""
|
||||
request = {
|
||||
"method": method,
|
||||
@@ -446,8 +466,7 @@ class MatrixFederationHttpClient(object):
|
||||
self.server_name, key, sig,
|
||||
)).encode('ascii')
|
||||
)
|
||||
|
||||
headers_dict[b"Authorization"] = auth_headers
|
||||
return auth_headers
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def put_json(self, destination, path, args={}, data={},
|
||||
@@ -477,17 +496,18 @@ class MatrixFederationHttpClient(object):
|
||||
requests)
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
|
||||
Fails with ``HttpResponseException`` if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
|
||||
request = MatrixFederationRequest(
|
||||
@@ -531,17 +551,18 @@ class MatrixFederationHttpClient(object):
|
||||
try the request anyway.
|
||||
args (dict): query params
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
|
||||
Fails with ``HttpResponseException`` if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
|
||||
request = MatrixFederationRequest(
|
||||
@@ -586,17 +607,18 @@ class MatrixFederationHttpClient(object):
|
||||
ignore_backoff (bool): true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
|
||||
Fails with ``HttpResponseException`` if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
logger.debug("get_json args: %s", args)
|
||||
|
||||
@@ -637,17 +659,18 @@ class MatrixFederationHttpClient(object):
|
||||
ignore_backoff (bool): true to ignore the historical backoff data and
|
||||
try the request anyway.
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
|
||||
Fails with ``HttpResponseException`` if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="DELETE",
|
||||
@@ -680,18 +703,20 @@ class MatrixFederationHttpClient(object):
|
||||
args (dict): Optional dictionary used to create the query string.
|
||||
ignore_backoff (bool): true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
Deferred: resolves with an (int,dict) tuple of the file length and
|
||||
a dict of the response headers.
|
||||
Deferred[tuple[int, dict]]: Resolves with an (int,dict) tuple of
|
||||
the file length and a dict of the response headers.
|
||||
|
||||
Fails with ``HttpResponseException`` if we get an HTTP response code
|
||||
>= 300
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET",
|
||||
@@ -784,21 +809,21 @@ def check_content_type_is_json(headers):
|
||||
headers (twisted.web.http_headers.Headers): headers to check
|
||||
|
||||
Raises:
|
||||
RuntimeError if the
|
||||
RequestSendFailed: if the Content-Type header is missing or isn't JSON
|
||||
|
||||
"""
|
||||
c_type = headers.getRawHeaders(b"Content-Type")
|
||||
if c_type is None:
|
||||
raise RuntimeError(
|
||||
raise RequestSendFailed(RuntimeError(
|
||||
"No Content-Type header"
|
||||
)
|
||||
), can_retry=False)
|
||||
|
||||
c_type = c_type[0].decode('ascii') # only the first header
|
||||
val, options = cgi.parse_header(c_type)
|
||||
if val != "application/json":
|
||||
raise RuntimeError(
|
||||
raise RequestSendFailed(RuntimeError(
|
||||
"Content-Type not application/json: was '%s'" % c_type
|
||||
)
|
||||
), can_retry=False)
|
||||
|
||||
|
||||
def encode_query_args(args):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user